first public release
authorIntel <intel.com>
Tue, 4 Sep 2012 12:54:00 +0000 (13:54 +0100)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Mon, 11 Mar 2013 16:19:20 +0000 (17:19 +0100)
version 1.2.3

Signed-off-by: Intel
435 files changed:
Makefile [new file with mode: 0644]
app/Makefile [new file with mode: 0644]
app/chkincs/Makefile [new file with mode: 0644]
app/chkincs/test.c [new file with mode: 0644]
app/chkincs/test.h [new file with mode: 0644]
app/chkincs/test_alarm.c [new file with mode: 0644]
app/chkincs/test_atomic.c [new file with mode: 0644]
app/chkincs/test_branch_prediction.c [new file with mode: 0644]
app/chkincs/test_byteorder.c [new file with mode: 0644]
app/chkincs/test_common.c [new file with mode: 0644]
app/chkincs/test_cpuflags.c [new file with mode: 0644]
app/chkincs/test_cycles.c [new file with mode: 0644]
app/chkincs/test_debug.c [new file with mode: 0644]
app/chkincs/test_eal.c [new file with mode: 0644]
app/chkincs/test_errno.c [new file with mode: 0644]
app/chkincs/test_ethdev.c [new file with mode: 0644]
app/chkincs/test_ether.c [new file with mode: 0644]
app/chkincs/test_fbk_hash.c [new file with mode: 0644]
app/chkincs/test_hash.c [new file with mode: 0644]
app/chkincs/test_hash_crc.c [new file with mode: 0644]
app/chkincs/test_interrupts.c [new file with mode: 0644]
app/chkincs/test_ip.c [new file with mode: 0644]
app/chkincs/test_jhash.c [new file with mode: 0644]
app/chkincs/test_launch.c [new file with mode: 0644]
app/chkincs/test_lcore.c [new file with mode: 0644]
app/chkincs/test_log.c [new file with mode: 0644]
app/chkincs/test_lpm.c [new file with mode: 0644]
app/chkincs/test_malloc.c [new file with mode: 0644]
app/chkincs/test_mbuf.c [new file with mode: 0644]
app/chkincs/test_memcpy.c [new file with mode: 0644]
app/chkincs/test_memory.c [new file with mode: 0644]
app/chkincs/test_mempool.c [new file with mode: 0644]
app/chkincs/test_memzone.c [new file with mode: 0644]
app/chkincs/test_pci.c [new file with mode: 0644]
app/chkincs/test_pci_dev_ids.c [new file with mode: 0644]
app/chkincs/test_per_lcore.c [new file with mode: 0644]
app/chkincs/test_prefetch.c [new file with mode: 0644]
app/chkincs/test_random.c [new file with mode: 0644]
app/chkincs/test_ring.c [new file with mode: 0644]
app/chkincs/test_rwlock.c [new file with mode: 0644]
app/chkincs/test_sctp.c [new file with mode: 0644]
app/chkincs/test_spinlock.c [new file with mode: 0644]
app/chkincs/test_string_fns.c [new file with mode: 0644]
app/chkincs/test_tailq.c [new file with mode: 0644]
app/chkincs/test_tcp.c [new file with mode: 0644]
app/chkincs/test_timer.c [new file with mode: 0644]
app/chkincs/test_udp.c [new file with mode: 0644]
app/chkincs/test_version.c [new file with mode: 0644]
app/dump_cfg/Makefile [new file with mode: 0644]
app/dump_cfg/dump_cfg_main.c [new file with mode: 0644]
app/test-pmd/Makefile [new file with mode: 0644]
app/test-pmd/cmdline.c [new file with mode: 0644]
app/test-pmd/config.c [new file with mode: 0644]
app/test-pmd/csumonly.c [new file with mode: 0644]
app/test-pmd/ieee1588fwd.c [new file with mode: 0644]
app/test-pmd/iofwd.c [new file with mode: 0644]
app/test-pmd/macfwd.c [new file with mode: 0644]
app/test-pmd/parameters.c [new file with mode: 0644]
app/test-pmd/rxonly.c [new file with mode: 0644]
app/test-pmd/testpmd.c [new file with mode: 0644]
app/test-pmd/testpmd.h [new file with mode: 0644]
app/test-pmd/txonly.c [new file with mode: 0644]
app/test/Makefile [new file with mode: 0644]
app/test/autotest.py [new file with mode: 0755]
app/test/commands.c [new file with mode: 0644]
app/test/graph_mempool.py [new file with mode: 0755]
app/test/graph_ring.py [new file with mode: 0755]
app/test/process.h [new file with mode: 0644]
app/test/test.c [new file with mode: 0644]
app/test/test.h [new file with mode: 0644]
app/test/test_alarm.c [new file with mode: 0644]
app/test/test_atomic.c [new file with mode: 0644]
app/test/test_byteorder.c [new file with mode: 0644]
app/test/test_cpuflags.c [new file with mode: 0644]
app/test/test_cycles.c [new file with mode: 0644]
app/test/test_debug.c [new file with mode: 0644]
app/test/test_eal_flags.c [new file with mode: 0644]
app/test/test_errno.c [new file with mode: 0644]
app/test/test_hash.c [new file with mode: 0644]
app/test/test_interrupts.c [new file with mode: 0644]
app/test/test_logs.c [new file with mode: 0644]
app/test/test_lpm.c [new file with mode: 0644]
app/test/test_lpm_routes.h [new file with mode: 0644]
app/test/test_malloc.c [new file with mode: 0644]
app/test/test_mbuf.c [new file with mode: 0644]
app/test/test_memcpy.c [new file with mode: 0644]
app/test/test_memory.c [new file with mode: 0644]
app/test/test_mempool.c [new file with mode: 0644]
app/test/test_memzone.c [new file with mode: 0644]
app/test/test_mp_secondary.c [new file with mode: 0644]
app/test/test_pci.c [new file with mode: 0644]
app/test/test_per_lcore.c [new file with mode: 0644]
app/test/test_prefetch.c [new file with mode: 0644]
app/test/test_ring.c [new file with mode: 0644]
app/test/test_rwlock.c [new file with mode: 0644]
app/test/test_spinlock.c [new file with mode: 0644]
app/test/test_string_fns.c [new file with mode: 0644]
app/test/test_tailq.c [new file with mode: 0644]
app/test/test_timer.c [new file with mode: 0644]
app/test/test_version.c [new file with mode: 0644]
config/defconfig_i686-default-linuxapp-gcc [new file with mode: 0644]
config/defconfig_i686-default-linuxapp-icc [new file with mode: 0644]
config/defconfig_x86_64-default-linuxapp-gcc [new file with mode: 0644]
config/defconfig_x86_64-default-linuxapp-icc [new file with mode: 0644]
examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/cmdline/Makefile [new file with mode: 0644]
examples/cmdline/commands.c [new file with mode: 0644]
examples/cmdline/commands.h [new file with mode: 0644]
examples/cmdline/main.c [new file with mode: 0644]
examples/cmdline/main.h [new file with mode: 0644]
examples/cmdline/parse_obj_list.c [new file with mode: 0644]
examples/cmdline/parse_obj_list.h [new file with mode: 0644]
examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/dpdk_qat/Makefile [new file with mode: 0644]
examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf [new file with mode: 0644]
examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf [new file with mode: 0644]
examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf [new file with mode: 0644]
examples/dpdk_qat/crypto.c [new file with mode: 0644]
examples/dpdk_qat/crypto.h [new file with mode: 0644]
examples/dpdk_qat/main.c [new file with mode: 0644]
examples/dpdk_qat/main.h [new file with mode: 0644]
examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/exception_path/Makefile [new file with mode: 0644]
examples/exception_path/main.c [new file with mode: 0644]
examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/helloworld/Makefile [new file with mode: 0644]
examples/helloworld/main.c [new file with mode: 0644]
examples/helloworld/main.h [new file with mode: 0644]
examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/ipv4_frag/Makefile [new file with mode: 0644]
examples/ipv4_frag/main.c [new file with mode: 0644]
examples/ipv4_frag/main.h [new file with mode: 0644]
examples/ipv4_frag/rte_ipv4_frag.h [new file with mode: 0644]
examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/ipv4_multicast/Makefile [new file with mode: 0644]
examples/ipv4_multicast/main.c [new file with mode: 0644]
examples/ipv4_multicast/main.h [new file with mode: 0644]
examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/l2fwd-vf/Makefile [new file with mode: 0644]
examples/l2fwd-vf/main.c [new file with mode: 0644]
examples/l2fwd-vf/main.h [new file with mode: 0644]
examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/l2fwd/Makefile [new file with mode: 0644]
examples/l2fwd/main.c [new file with mode: 0644]
examples/l2fwd/main.h [new file with mode: 0644]
examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/l3fwd-vf/Makefile [new file with mode: 0644]
examples/l3fwd-vf/main.c [new file with mode: 0644]
examples/l3fwd-vf/main.h [new file with mode: 0644]
examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf [new file with mode: 0644]
examples/l3fwd/Makefile [new file with mode: 0644]
examples/l3fwd/main.c [new file with mode: 0644]
examples/l3fwd/main.h [new file with mode: 0644]
examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf [new file with mode: 0644]
examples/link_status_interrupt/Makefile [new file with mode: 0644]
examples/link_status_interrupt/main.c [new file with mode: 0644]
examples/link_status_interrupt/main.h [new file with mode: 0644]
examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/load_balancer/Makefile [new file with mode: 0644]
examples/load_balancer/config.c [new file with mode: 0644]
examples/load_balancer/init.c [new file with mode: 0644]
examples/load_balancer/main.c [new file with mode: 0644]
examples/load_balancer/main.h [new file with mode: 0644]
examples/load_balancer/runtime.c [new file with mode: 0644]
examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf [new file with mode: 0644]
examples/multi_process/Makefile [new file with mode: 0644]
examples/multi_process/client_server_mp/Makefile [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_client/Makefile [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_client/client.c [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/Makefile [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/args.c [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/args.h [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/init.c [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/init.h [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/main.c [new file with mode: 0644]
examples/multi_process/client_server_mp/mp_server/main.h [new file with mode: 0644]
examples/multi_process/client_server_mp/shared/common.h [new file with mode: 0644]
examples/multi_process/client_server_mp/shared/init_drivers.h [new file with mode: 0644]
examples/multi_process/simple_mp/Makefile [new file with mode: 0644]
examples/multi_process/simple_mp/main.c [new file with mode: 0644]
examples/multi_process/simple_mp/mp_commands.c [new file with mode: 0644]
examples/multi_process/simple_mp/mp_commands.h [new file with mode: 0644]
examples/multi_process/symmetric_mp/Makefile [new file with mode: 0644]
examples/multi_process/symmetric_mp/main.c [new file with mode: 0644]
examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/timer/Makefile [new file with mode: 0644]
examples/timer/main.c [new file with mode: 0644]
examples/timer/main.h [new file with mode: 0644]
examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf [new file with mode: 0644]
examples/vmdq_dcb/Makefile [new file with mode: 0644]
examples/vmdq_dcb/main.c [new file with mode: 0644]
examples/vmdq_dcb/main.h [new file with mode: 0644]
lib/Makefile [new file with mode: 0644]
lib/librte_cmdline/Makefile [new file with mode: 0644]
lib/librte_cmdline/cmdline.c [new file with mode: 0644]
lib/librte_cmdline/cmdline.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_cirbuf.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_cirbuf.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_etheraddr.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_etheraddr.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_ipaddr.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_ipaddr.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_num.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_num.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_portlist.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_portlist.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_string.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_parse_string.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_rdline.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_rdline.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_socket.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_socket.h [new file with mode: 0644]
lib/librte_cmdline/cmdline_vt100.c [new file with mode: 0644]
lib/librte_cmdline/cmdline_vt100.h [new file with mode: 0644]
lib/librte_eal/Makefile [new file with mode: 0644]
lib/librte_eal/common/Makefile [new file with mode: 0644]
lib/librte_eal/common/eal_common_cpuflags.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_errno.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_launch.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_log.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_memory.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_memzone.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_pci.c [new file with mode: 0644]
lib/librte_eal/common/eal_common_tailqs.c [new file with mode: 0644]
lib/librte_eal/common/include/eal_private.h [new file with mode: 0644]
lib/librte_eal/common/include/i686/arch/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_alarm.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_branch_prediction.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_byteorder.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_common.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_cpuflags.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_cycles.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_debug.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_eal.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_errno.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_interrupts.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_launch.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_lcore.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_log.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_memcpy.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_memory.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_memzone.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_pci.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_pci_dev_ids.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_per_lcore.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_prefetch.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_random.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_rwlock.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_spinlock.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_string_fns.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_tailq.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_version.h [new file with mode: 0644]
lib/librte_eal/common/include/rte_warnings.h [new file with mode: 0644]
lib/librte_eal/common/include/x86_64/arch/rte_atomic.h [new file with mode: 0644]
lib/librte_eal/linuxapp/Makefile [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/Makefile [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_alarm.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_debug.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_hpet.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_hugepage_info.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_interrupts.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_lcore.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_log.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_memory.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_pci.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/eal_thread.c [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/eal_hugepages.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/eal_thread.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h [new file with mode: 0644]
lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h [new file with mode: 0644]
lib/librte_eal/linuxapp/igb_uio/Makefile [new file with mode: 0644]
lib/librte_eal/linuxapp/igb_uio/igb_uio.c [new file with mode: 0644]
lib/librte_ether/Makefile [new file with mode: 0644]
lib/librte_ether/rte_ethdev.c [new file with mode: 0644]
lib/librte_ether/rte_ethdev.h [new file with mode: 0644]
lib/librte_ether/rte_ether.h [new file with mode: 0644]
lib/librte_hash/Makefile [new file with mode: 0644]
lib/librte_hash/rte_fbk_hash.c [new file with mode: 0644]
lib/librte_hash/rte_fbk_hash.h [new file with mode: 0644]
lib/librte_hash/rte_hash.c [new file with mode: 0644]
lib/librte_hash/rte_hash.h [new file with mode: 0644]
lib/librte_hash/rte_hash_crc.h [new file with mode: 0644]
lib/librte_hash/rte_jhash.h [new file with mode: 0644]
lib/librte_lpm/Makefile [new file with mode: 0644]
lib/librte_lpm/rte_lpm.c [new file with mode: 0644]
lib/librte_lpm/rte_lpm.h [new file with mode: 0644]
lib/librte_malloc/Makefile [new file with mode: 0644]
lib/librte_malloc/malloc_elem.c [new file with mode: 0644]
lib/librte_malloc/malloc_elem.h [new file with mode: 0644]
lib/librte_malloc/malloc_heap.c [new file with mode: 0644]
lib/librte_malloc/malloc_heap.h [new file with mode: 0644]
lib/librte_malloc/rte_malloc.c [new file with mode: 0644]
lib/librte_malloc/rte_malloc.h [new file with mode: 0644]
lib/librte_mbuf/Makefile [new file with mode: 0644]
lib/librte_mbuf/rte_mbuf.c [new file with mode: 0644]
lib/librte_mbuf/rte_mbuf.h [new file with mode: 0644]
lib/librte_mempool/Makefile [new file with mode: 0644]
lib/librte_mempool/rte_mempool.c [new file with mode: 0644]
lib/librte_mempool/rte_mempool.h [new file with mode: 0644]
lib/librte_net/Makefile [new file with mode: 0644]
lib/librte_net/rte_ip.h [new file with mode: 0644]
lib/librte_net/rte_sctp.h [new file with mode: 0644]
lib/librte_net/rte_tcp.h [new file with mode: 0644]
lib/librte_net/rte_udp.h [new file with mode: 0644]
lib/librte_pmd_igb/Makefile [new file with mode: 0644]
lib/librte_pmd_igb/e1000_ethdev.c [new file with mode: 0644]
lib/librte_pmd_igb/e1000_ethdev.h [new file with mode: 0644]
lib/librte_pmd_igb/e1000_logs.h [new file with mode: 0644]
lib/librte_pmd_igb/e1000_rxtx.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/README [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_82575.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_82575.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_api.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_api.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_defines.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_hw.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_mac.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_mac.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_manage.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_manage.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_mbx.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_mbx.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_nvm.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_nvm.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_osdep.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_osdep.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_phy.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_phy.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_regs.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_vf.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/e1000_vf.h [new file with mode: 0644]
lib/librte_pmd_igb/igb/if_igb.c [new file with mode: 0644]
lib/librte_pmd_igb/igb/if_igb.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/Makefile [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/README [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixv.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe/ixv.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe_ethdev.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe_ethdev.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe_fdir.c [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe_logs.h [new file with mode: 0644]
lib/librte_pmd_ixgbe/ixgbe_rxtx.c [new file with mode: 0644]
lib/librte_ring/Makefile [new file with mode: 0644]
lib/librte_ring/rte_ring.c [new file with mode: 0644]
lib/librte_ring/rte_ring.h [new file with mode: 0644]
lib/librte_timer/Makefile [new file with mode: 0644]
lib/librte_timer/rte_timer.c [new file with mode: 0644]
lib/librte_timer/rte_timer.h [new file with mode: 0644]
mk/arch/i686/rte.vars.mk [new file with mode: 0644]
mk/arch/x86_64/rte.vars.mk [new file with mode: 0644]
mk/exec-env/linuxapp/rte.app.mk [new file with mode: 0644]
mk/exec-env/linuxapp/rte.vars.mk [new file with mode: 0644]
mk/internal/rte.build-post.mk [new file with mode: 0644]
mk/internal/rte.build-pre.mk [new file with mode: 0644]
mk/internal/rte.clean-post.mk [new file with mode: 0644]
mk/internal/rte.clean-pre.mk [new file with mode: 0644]
mk/internal/rte.compile-post.mk [new file with mode: 0644]
mk/internal/rte.compile-pre.mk [new file with mode: 0644]
mk/internal/rte.depdirs-post.mk [new file with mode: 0644]
mk/internal/rte.depdirs-pre.mk [new file with mode: 0644]
mk/internal/rte.exthelp-post.mk [new file with mode: 0644]
mk/internal/rte.install-post.mk [new file with mode: 0644]
mk/internal/rte.install-pre.mk [new file with mode: 0644]
mk/machine/atm/rte.vars.mk [new file with mode: 0644]
mk/machine/default/rte.vars.mk [new file with mode: 0644]
mk/machine/ivb/rte.vars.mk [new file with mode: 0644]
mk/machine/native/rte.vars.mk [new file with mode: 0644]
mk/machine/nhm/rte.vars.mk [new file with mode: 0644]
mk/machine/snb/rte.vars.mk [new file with mode: 0644]
mk/machine/wsm/rte.vars.mk [new file with mode: 0644]
mk/rte.app.mk [new file with mode: 0644]
mk/rte.doc.mk [new file with mode: 0644]
mk/rte.extapp.mk [new file with mode: 0644]
mk/rte.extlib.mk [new file with mode: 0644]
mk/rte.extobj.mk [new file with mode: 0644]
mk/rte.extvars.mk [new file with mode: 0644]
mk/rte.gnuconfigure.mk [new file with mode: 0644]
mk/rte.hostapp.mk [new file with mode: 0644]
mk/rte.hostlib.mk [new file with mode: 0644]
mk/rte.install.mk [new file with mode: 0644]
mk/rte.lib.mk [new file with mode: 0644]
mk/rte.module.mk [new file with mode: 0644]
mk/rte.obj.mk [new file with mode: 0644]
mk/rte.sdkbuild.mk [new file with mode: 0644]
mk/rte.sdkconfig.mk [new file with mode: 0644]
mk/rte.sdkdepdirs.mk [new file with mode: 0644]
mk/rte.sdkdoc.mk [new file with mode: 0644]
mk/rte.sdkgcov.mk [new file with mode: 0644]
mk/rte.sdkinstall.mk [new file with mode: 0644]
mk/rte.sdkroot.mk [new file with mode: 0644]
mk/rte.sdktest.mk [new file with mode: 0644]
mk/rte.sdktestall.mk [new file with mode: 0644]
mk/rte.subdir.mk [new file with mode: 0644]
mk/rte.vars.mk [new file with mode: 0644]
mk/target/generic/rte.app.mk [new file with mode: 0644]
mk/target/generic/rte.vars.mk [new file with mode: 0644]
mk/toolchain/gcc/rte.toolchain-compat.mk [new file with mode: 0644]
mk/toolchain/gcc/rte.vars.mk [new file with mode: 0644]
mk/toolchain/icc/rte.toolchain-compat.mk [new file with mode: 0644]
mk/toolchain/icc/rte.vars.mk [new file with mode: 0644]
scripts/Makefile [new file with mode: 0644]
scripts/depdirs-rule.sh [new file with mode: 0755]
scripts/gen-build-mk.sh [new file with mode: 0755]
scripts/gen-config-h.sh [new file with mode: 0755]
scripts/import_autotest.sh [new file with mode: 0755]
scripts/relpath.sh [new file with mode: 0755]
scripts/test-framework.sh [new file with mode: 0755]
scripts/testhost/Makefile [new file with mode: 0644]
scripts/testhost/testhost.c [new file with mode: 0644]
tools/setup.sh [new file with mode: 0755]

diff --git a/Makefile b/Makefile
new file mode 100644 (file)
index 0000000..347f7e1
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,47 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# Head Makefile for compiling rte SDK
+#
+
+RTE_SDK := $(CURDIR)
+export RTE_SDK
+
+#
+# directory list
+#
+
+ROOTDIRS-y := scripts lib app
+
+include $(RTE_SDK)/mk/rte.sdkroot.mk
diff --git a/app/Makefile b/app/Makefile
new file mode 100644 (file)
index 0000000..7a206e3
--- /dev/null
@@ -0,0 +1,42 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_APP_TEST) += test
+DIRS-$(CONFIG_RTE_TEST_PMD) += test-pmd
+DIRS-$(CONFIG_RTE_APP_CHKINCS) += chkincs
+
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += dump_cfg
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/app/chkincs/Makefile b/app/chkincs/Makefile
new file mode 100644 (file)
index 0000000..1ec3337
--- /dev/null
@@ -0,0 +1,96 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+APP = chkincs
+
+#
+# all source are stored in SRCS-y
+#
+
+SRCS-$(CONFIG_RTE_APP_CHKINCS) +=  test.c \
+       test_alarm.c \
+       test_atomic.c \
+       test_branch_prediction.c \
+       test_byteorder.c \
+       test_common.c \
+       test_cpuflags.c \
+       test_cycles.c \
+       test_debug.c \
+       test_eal.c \
+       test_errno.c \
+       test_ethdev.c \
+       test_ether.c \
+       test_fbk_hash.c \
+       test_hash_crc.c \
+       test_hash.c \
+       test_interrupts.c \
+       test_ip.c \
+       test_jhash.c \
+       test_launch.c \
+       test_lcore.c \
+       test_log.c \
+       test_lpm.c \
+       test_malloc.c \
+       test_mbuf.c \
+       test_memcpy.c \
+       test_memory.c \
+       test_mempool.c \
+       test_memzone.c \
+       test_pci_dev_ids.c \
+       test_pci.c \
+       test_per_lcore.c \
+       test_prefetch.c \
+       test_random.c \
+       test_ring.c \
+       test_rwlock.c \
+       test_sctp.c \
+       test_spinlock.c \
+       test_string_fns.c \
+       test_tailq.c \
+       test_tcp.c \
+       test_timer.c \
+       test_udp.c \
+       test_version.c
+
+CFLAGS += -O0 -fno-inline
+CFLAGS += $(WERROR_FLAGS)
+
+# this application needs libraries first
+DEPDIRS-$(CONFIG_RTE_APP_CHKINCS) += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/chkincs/test.c b/app/chkincs/test.c
new file mode 100644 (file)
index 0000000..cd44fc2
--- /dev/null
@@ -0,0 +1,50 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+main(__attribute__((unused)) int argc, __attribute__((unused)) char **argv)
+{
+       return 0;
+}
diff --git a/app/chkincs/test.h b/app/chkincs/test.h
new file mode 100644 (file)
index 0000000..4d6ec6e
--- /dev/null
@@ -0,0 +1,90 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _TEST_H_
+#define _TEST_H_
+
+/* icc on baremetal gives us troubles with function named 'main' */
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define main _main
+#endif
+
+int main(int argc, char **argv);
+
+int test_alarm(void);
+int test_atomic(void);
+int test_branch_prediction(void);
+int test_byteorder(void);
+int test_common(void);
+int test_cpuflags(void);
+int test_cycles(void);
+int test_debug(void);
+int test_eal(void);
+int test_errno(void);
+int test_ethdev(void);
+int test_ether(void);
+int test_fbk_hash(void);
+int test_hash_crc(void);
+int test_hash(void);
+int test_interrupts(void);
+int test_ip(void);
+int test_jhash(void);
+int test_launch(void);
+int test_lcore(void);
+int test_log(void);
+int test_lpm(void);
+int test_malloc(void);
+int test_mbuf(void);
+int test_memcpy(void);
+int test_memory(void);
+int test_mempool(void);
+int test_memzone(void);
+int test_pci_dev_ids(void);
+int test_pci(void);
+int test_per_lcore(void);
+int test_prefetch(void);
+int test_random(void);
+int test_ring(void);
+int test_rwlock(void);
+int test_sctp(void);
+int test_spinlock(void);
+int test_string_fns(void);
+int test_tailq(void);
+int test_tcp(void);
+int test_timer(void);
+int test_udp(void);
+int test_version(void);
+
+#endif
diff --git a/app/chkincs/test_alarm.c b/app/chkincs/test_alarm.c
new file mode 100644 (file)
index 0000000..233d4f6
--- /dev/null
@@ -0,0 +1,53 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_alarm.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_alarm(void)
+{
+       rte_eal_alarm_set(10, 0, 0);
+       return 1;
+}
diff --git a/app/chkincs/test_atomic.c b/app/chkincs/test_atomic.c
new file mode 100644 (file)
index 0000000..f490639
--- /dev/null
@@ -0,0 +1,93 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_atomic.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_atomic(void)
+{
+       rte_atomic16_t a16 = RTE_ATOMIC16_INIT(1);
+       rte_atomic32_t a32 = RTE_ATOMIC32_INIT(1);
+       rte_atomic64_t a64 = RTE_ATOMIC64_INIT(1);
+       int x;
+
+       rte_mb();
+       rte_wmb();
+       rte_rmb();
+
+       rte_atomic16_init(&a16);
+       rte_atomic16_set(&a16, 1);
+       x = rte_atomic16_read(&a16);
+       rte_atomic16_inc(&a16);
+       rte_atomic16_dec(&a16);
+       rte_atomic16_add(&a16, 5);
+       rte_atomic16_sub(&a16, 5);
+       x = rte_atomic16_test_and_set(&a16);
+       x = rte_atomic16_add_return(&a16, 10);
+
+       rte_atomic32_init(&a32);
+       rte_atomic32_set(&a32, 1);
+       x = rte_atomic32_read(&a32);
+       rte_atomic32_inc(&a32);
+       rte_atomic32_dec(&a32);
+       rte_atomic32_add(&a32, 5);
+       rte_atomic32_sub(&a32, 5);
+       x = rte_atomic32_test_and_set(&a32);
+       x = rte_atomic32_add_return(&a32, 10);
+
+       rte_atomic64_init(&a64);
+       rte_atomic64_set(&a64, 1);
+       x = rte_atomic64_read(&a64);
+       rte_atomic64_inc(&a64);
+       rte_atomic64_dec(&a64);
+       rte_atomic64_add(&a64, 5);
+       rte_atomic64_sub(&a64, 5);
+       x = rte_atomic64_test_and_set(&a64);
+       x = rte_atomic64_add_return(&a64, 10);
+       (void)x;
+
+       return 1;
+}
+
diff --git a/app/chkincs/test_branch_prediction.c b/app/chkincs/test_branch_prediction.c
new file mode 100644 (file)
index 0000000..219ddf1
--- /dev/null
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_branch_prediction.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int test_branch_prediction(void)
+{
+       int a = 1;
+       int b = 2;
+
+       if (likely(a < b))
+               return 0;
+       else if (unlikely(a < b))
+               return 1;
+       else return 2;
+}
diff --git a/app/chkincs/test_byteorder.c b/app/chkincs/test_byteorder.c
new file mode 100644 (file)
index 0000000..91b0d6e
--- /dev/null
@@ -0,0 +1,84 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_byteorder.h>
+
+#include "test.h"
+
+static volatile uint16_t u16 = 0x1337;
+static volatile uint32_t u32 = 0xdeadbeefUL;
+static volatile uint64_t u64 = 0xdeadcafebabefaceULL;
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_byteorder(void)
+{
+       uint16_t res_u16;
+       uint32_t res_u32;
+       uint64_t res_u64;
+
+       res_u16 = rte_bswap16(u16);
+       res_u32 = rte_bswap32(u32);
+       res_u64 = rte_bswap64(u64);
+
+       res_u16 = rte_cpu_to_le_16(u16);
+       res_u32 = rte_cpu_to_le_32(u32);
+       res_u64 = rte_cpu_to_le_64(u64);
+
+       res_u16 = rte_cpu_to_be_16(u16);
+       res_u32 = rte_cpu_to_be_32(u32);
+       res_u64 = rte_cpu_to_be_64(u64);
+
+       res_u16 = rte_le_to_cpu_16(u16);
+       res_u32 = rte_le_to_cpu_32(u32);
+       res_u64 = rte_le_to_cpu_64(u64);
+
+       res_u16 = rte_be_to_cpu_16(u16);
+       res_u32 = rte_be_to_cpu_32(u32);
+       res_u64 = rte_be_to_cpu_64(u64);
+
+       (void)res_u16;
+       (void)res_u32;
+       (void)res_u64;
+
+       return 1;
+}
diff --git a/app/chkincs/test_common.c b/app/chkincs/test_common.c
new file mode 100644 (file)
index 0000000..3f86d5c
--- /dev/null
@@ -0,0 +1,76 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_common.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static int
+test_func(__rte_unused int var1, int var2)
+{
+       RTE_SET_USED(var2);
+       return 1;
+}
+
+static int static_var1 = 3;
+static int static_var2 = 6;
+
+int
+test_common(void)
+{
+       int *ptr1 = &static_var1, *ptr2 = &static_var2;
+       int var;
+
+       ptr2 = RTE_PTR_ADD(ptr1, 10);
+       ptr2 = RTE_PTR_SUB(ptr1, 5);
+       var = RTE_PTR_DIFF(ptr1, ptr2);
+
+       var = RTE_ALIGN(var, 16);
+
+       RTE_BUILD_BUG_ON(0);
+
+       var = RTE_MIN(10, 5);
+       var = RTE_MAX(10, 5);
+
+       return test_func(10, 5);
+}
diff --git a/app/chkincs/test_cpuflags.c b/app/chkincs/test_cpuflags.c
new file mode 100644 (file)
index 0000000..017bb66
--- /dev/null
@@ -0,0 +1,53 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_cpuflags.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_cpuflags(void)
+{
+       rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3);
+       return 1;
+}
diff --git a/app/chkincs/test_cycles.c b/app/chkincs/test_cycles.c
new file mode 100644 (file)
index 0000000..c85a35a
--- /dev/null
@@ -0,0 +1,63 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_cycles.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_cycles(void)
+{
+       uint64_t hz, c;
+
+       hz = rte_get_hpet_hz();
+       c = rte_get_hpet_cycles();
+       rte_delay_us(10);
+       rte_delay_ms(10);
+       c = rte_rdtsc();
+
+       (void)hz;
+       (void)c;
+
+       return 1;
+}
diff --git a/app/chkincs/test_debug.c b/app/chkincs/test_debug.c
new file mode 100644 (file)
index 0000000..58ecdad
--- /dev/null
@@ -0,0 +1,55 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_debug.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_debug(void)
+{
+       rte_dump_stack();
+       rte_dump_registers();
+       rte_panic("oops %d", 10);
+       return 1;
+}
diff --git a/app/chkincs/test_eal.c b/app/chkincs/test_eal.c
new file mode 100644 (file)
index 0000000..2b77e62
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_eal.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_eal(void)
+{
+       return 1;
+}
diff --git a/app/chkincs/test_errno.c b/app/chkincs/test_errno.c
new file mode 100644 (file)
index 0000000..d02ec94
--- /dev/null
@@ -0,0 +1,54 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_errno.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_errno(void)
+{
+       if (rte_errno != 0)
+               return -1;
+       return 1;
+}
diff --git a/app/chkincs/test_ethdev.c b/app/chkincs/test_ethdev.c
new file mode 100644 (file)
index 0000000..180a796
--- /dev/null
@@ -0,0 +1,72 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+
+#include <rte_ethdev.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static struct rte_eth_conf port_conf;
+static struct rte_eth_rxconf rx_conf;
+static struct rte_eth_txconf tx_conf;
+static struct rte_mempool *mp;
+
+int
+test_ethdev(void)
+{
+       struct rte_eth_link link;
+       int x;
+       struct ether_addr ea;
+
+       x = rte_eth_dev_count();
+       x = rte_eth_dev_configure(0, 1, 1, &port_conf);
+       rte_eth_macaddr_get(0, &ea);
+       x = rte_eth_rx_queue_setup(0, 0, 128, 0, &rx_conf, mp);
+       x = rte_eth_tx_queue_setup(0, 0, 128, 0, &tx_conf);
+       rte_eth_link_get(0, &link);
+       x = rte_eth_dev_start(0);
+
+       (void)x;
+
+       return 1;
+}
diff --git a/app/chkincs/test_ether.c b/app/chkincs/test_ether.c
new file mode 100644 (file)
index 0000000..b089aaf
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_ether.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_ether(void)
+{
+       return 1;
+}
diff --git a/app/chkincs/test_fbk_hash.c b/app/chkincs/test_fbk_hash.c
new file mode 100644 (file)
index 0000000..e1e62a0
--- /dev/null
@@ -0,0 +1,53 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_fbk_hash.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_fbk_hash(void)
+{
+       void * ptr = (void *)RTE_FBK_HASH_FUNC_DEFAULT;
+       return ptr == ptr;
+}
diff --git a/app/chkincs/test_hash.c b/app/chkincs/test_hash.c
new file mode 100644 (file)
index 0000000..c989070
--- /dev/null
@@ -0,0 +1,85 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_hash.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+/* Parameters used for hash table in unit test functions. */
+static struct rte_hash_parameters ut_params = {
+       "name", /* name */
+       64,  /* entries */
+       4, /* bucket_entries */
+       8, /* key_len */
+       0, /* hash_func */
+       0, /* hash_func_init_val */
+       0, /* socket_id */
+};
+
+struct key {
+       char key[8];
+};
+
+/* Keys used by unit test functions */
+static struct key keys[1] = {
+       {
+               { 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, 0x01, 0x02, },
+       }
+};
+
+int test_hash(void)
+{
+       struct rte_hash *handle;
+       int32_t pos0;
+
+       handle = rte_hash_create(&ut_params);
+       if (handle == 0) {
+               return -1;
+       }
+       pos0 = rte_hash_add_key(handle, &keys[0]);
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       pos0 = rte_hash_del_key(handle, &keys[0]);
+       rte_hash_free(handle);
+       (void)pos0; /* remove compiler warning */
+       return 0;
+}
diff --git a/app/chkincs/test_hash_crc.c b/app/chkincs/test_hash_crc.c
new file mode 100644 (file)
index 0000000..f996a09
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_hash_crc.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_hash_crc(void)
+{
+       return 1;
+}
diff --git a/app/chkincs/test_interrupts.c b/app/chkincs/test_interrupts.c
new file mode 100644 (file)
index 0000000..9d55160
--- /dev/null
@@ -0,0 +1,53 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_interrupts.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_interrupts(void)
+{
+       rte_intr_callback_register(0, 0, 0);
+       return 1;
+}
diff --git a/app/chkincs/test_ip.c b/app/chkincs/test_ip.c
new file mode 100644 (file)
index 0000000..4da405c
--- /dev/null
@@ -0,0 +1,53 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_ip.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_ip(void)
+{
+       uint64_t var = IPv4(1,1,1,1);
+       return IS_IPV4_MCAST(var);
+}
diff --git a/app/chkincs/test_jhash.c b/app/chkincs/test_jhash.c
new file mode 100644 (file)
index 0000000..f63a68d
--- /dev/null
@@ -0,0 +1,54 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_jhash.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_jhash(void)
+{
+       uint32_t a = 1, b = 2, c = 3;
+       __rte_jhash_mix(a,b,c);
+       return 1;
+}
diff --git a/app/chkincs/test_launch.c b/app/chkincs/test_launch.c
new file mode 100644 (file)
index 0000000..6395147
--- /dev/null
@@ -0,0 +1,68 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_launch.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static int
+test_launch_per_core(__attribute__((unused)) void *arg)
+{
+       return 0;
+}
+
+int
+test_launch(void)
+{
+       enum rte_lcore_state_t s;
+
+       rte_eal_remote_launch(test_launch_per_core, (void *)0, 0);
+       rte_eal_wait_lcore(0);
+       rte_eal_mp_remote_launch(test_launch_per_core, (void *)0, CALL_MASTER);
+       rte_eal_mp_wait_lcore();
+       s = rte_eal_get_lcore_state(0);
+
+       (void)s;
+
+       return 0;
+}
diff --git a/app/chkincs/test_lcore.c b/app/chkincs/test_lcore.c
new file mode 100644 (file)
index 0000000..221b122
--- /dev/null
@@ -0,0 +1,66 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_lcore.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_lcore(void)
+{
+       unsigned x;
+
+       x = rte_socket_id();
+       x = rte_lcore_id();
+       x = rte_lcore_to_socket_id(x);
+       x = rte_lcore_count();
+       x = rte_lcore_is_enabled(x);
+
+       RTE_LCORE_FOREACH(x)
+               (void)x;
+
+       RTE_LCORE_FOREACH_SLAVE(x)
+               (void)x;
+
+       return 0;
+}
diff --git a/app/chkincs/test_log.c b/app/chkincs/test_log.c
new file mode 100644 (file)
index 0000000..c640966
--- /dev/null
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_log.h>
+
+#include "test.h"
+
+#define RTE_LOGTYPE_TESTAPP1 RTE_LOGTYPE_USER1
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_log(void)
+{
+       rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1);
+       rte_set_log_level(RTE_LOG_DEBUG);
+       RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message %d\n", 1);
+       rte_log_dump_history();
+       return 0;
+}
diff --git a/app/chkincs/test_lpm.c b/app/chkincs/test_lpm.c
new file mode 100644 (file)
index 0000000..989676e
--- /dev/null
@@ -0,0 +1,64 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_lpm.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_lpm(void)
+{
+       struct rte_lpm *lpm = 0;
+       uint32_t ip = 0;
+       uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+
+       lpm = rte_lpm_create(__func__, -1, 256, RTE_LPM_HEAP);
+       if (lpm == 0)
+               return -1;
+       rte_lpm_add(lpm, ip, depth, next_hop_add);
+       rte_lpm_lookup(lpm, ip, &next_hop_return);
+       rte_lpm_delete(lpm, ip, depth);
+       rte_lpm_lookup(lpm, ip, &next_hop_return);
+       rte_lpm_free(lpm);
+       return 0;
+}
diff --git a/app/chkincs/test_malloc.c b/app/chkincs/test_malloc.c
new file mode 100644 (file)
index 0000000..885b356
--- /dev/null
@@ -0,0 +1,57 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_malloc.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_malloc(void)
+{
+       void *p1;
+
+       p1 = rte_malloc("dummy", 1000, 8);
+       rte_free(p1);
+
+       return 0;
+}
diff --git a/app/chkincs/test_mbuf.c b/app/chkincs/test_mbuf.c
new file mode 100644 (file)
index 0000000..1d3ff9c
--- /dev/null
@@ -0,0 +1,110 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+
+#include <rte_mbuf.h>
+
+#include "test.h"
+
+#define MBUF_SIZE 2048
+#define NB_MBUF   128
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_mbuf(void)
+{
+       struct rte_mempool *mbuf_pool;
+       struct rte_mbuf *m, *m1;
+       char *hdr;
+       int x;
+       int* ptr;
+
+       mbuf_pool = rte_mempool_create("test_mbuf_pool", NB_MBUF,
+                                      MBUF_SIZE, 32, 0,
+                                      (void (*)(struct rte_mempool*, void*)) 0, (void *)0,
+                                      rte_pktmbuf_init, (void *)0,
+                                      SOCKET_ID_ANY, 0);
+       if (mbuf_pool == NULL) {
+               return -1;
+       }
+
+       m = rte_pktmbuf_alloc(mbuf_pool);
+       if(m == NULL) {
+               return -1;
+       }
+
+       m1 = RTE_MBUF_FROM_BADDR(RTE_MBUF_TO_BADDR(m));
+       (void)m1;
+
+       x = rte_pktmbuf_pkt_len(m);
+       x = rte_pktmbuf_data_len(m);
+       x = rte_pktmbuf_headroom(m);
+       x = rte_pktmbuf_tailroom(m);
+       x = rte_pktmbuf_is_contiguous(m);
+
+       m = rte_pktmbuf_lastseg(m);
+
+       hdr = rte_pktmbuf_mtod(m, char *);
+       rte_pktmbuf_dump(m, 0);
+
+       hdr = rte_pktmbuf_append(m, 10);
+       x = rte_pktmbuf_trim(m, 10);
+       hdr = rte_pktmbuf_prepend(m, 10);
+       hdr = rte_pktmbuf_adj(m, 10);
+
+       ptr = (int*) rte_ctrlmbuf_data(m);
+       *ptr = rte_ctrlmbuf_len(m);
+       *ptr = rte_pktmbuf_pkt_len(m);
+       *ptr = rte_pktmbuf_data_len(m);
+
+       rte_pktmbuf_free_seg(m);
+       rte_pktmbuf_free(m);
+
+       RTE_MBUF_PREFETCH_TO_FREE(m);
+
+       rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 1);
+
+       (void)x;
+       (void)hdr;
+
+       return 0;
+}
diff --git a/app/chkincs/test_memcpy.c b/app/chkincs/test_memcpy.c
new file mode 100644 (file)
index 0000000..19db8d2
--- /dev/null
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_memcpy.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_memcpy(void)
+{
+       char buf[16];
+       const char s[] = "hello\n";
+       volatile int a = 10;
+
+       rte_memcpy(buf, s, sizeof(s));
+       rte_memcpy(buf, s, a);
+       return 0;
+}
diff --git a/app/chkincs/test_memory.c b/app/chkincs/test_memory.c
new file mode 100644 (file)
index 0000000..c17db89
--- /dev/null
@@ -0,0 +1,65 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_memory.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static int a __rte_cache_aligned;
+
+int
+test_memory(void)
+{
+       const struct rte_memseg *mem;
+       int s = CACHE_LINE_ROUNDUP(10);
+
+       rte_dump_physmem_layout();
+       s = rte_eal_get_physmem_size();
+       mem = rte_eal_get_physmem_layout();
+
+       (void)a;
+       (void)s;
+       (void)mem;
+
+       return 0;
+}
diff --git a/app/chkincs/test_mempool.c b/app/chkincs/test_mempool.c
new file mode 100644 (file)
index 0000000..9c669d6
--- /dev/null
@@ -0,0 +1,111 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_mempool.h>
+
+#include "test.h"
+
+#define MAX_BULK 16
+#define MEMPOOL_ELT_SIZE 2048
+#define MEMPOOL_SIZE 2047
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_mempool(void)
+{
+       struct rte_mempool *mp;
+       void *ptrs[MAX_BULK];
+       int x;
+       phys_addr_t addr;
+
+       mp = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
+                               MEMPOOL_ELT_SIZE, 0, 0,
+                               (void (*)(struct rte_mempool*, void*)) 0,
+                               (void *)0,
+                               (void (*)(struct rte_mempool*, void*, void*, unsigned int)) 0,
+                               (void *)0,
+                               SOCKET_ID_ANY, 0);
+
+       if (mp == NULL) {
+               return -1;
+       }
+
+       rte_mempool_set_bulk_count(mp, MAX_BULK);
+       rte_mempool_dump(mp);
+
+       rte_mempool_mc_get_bulk(mp, ptrs, 1);
+       rte_mempool_mc_get_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_sc_get_bulk(mp, ptrs, 1);
+       rte_mempool_sc_get_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_get_bulk(mp, ptrs, 1);
+       rte_mempool_get_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_mc_get(mp, ptrs);
+       rte_mempool_sc_get(mp, ptrs);
+       rte_mempool_get(mp, ptrs);
+
+       rte_mempool_mp_put_bulk(mp, ptrs, 1);
+       rte_mempool_mp_put_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_sp_put_bulk(mp, ptrs, 1);
+       rte_mempool_sp_put_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_put_bulk(mp, ptrs, 1);
+       rte_mempool_put_bulk(mp, ptrs, MAX_BULK);
+       rte_mempool_mp_put(mp, ptrs);
+       rte_mempool_sp_put(mp, ptrs);
+       rte_mempool_put(mp, ptrs);
+
+       __MEMPOOL_STAT_ADD(mp, put, 1);
+       __mempool_check_cookies(mp, 0, 0, 0);
+
+       x = rte_mempool_count(mp);
+       x = rte_mempool_free_count(mp);
+       x = rte_mempool_full(mp);
+       x = rte_mempool_empty(mp);
+
+       addr = rte_mempool_virt2phy(mp, ptrs[0]);
+       rte_mempool_audit(mp);
+       ptrs[0] = rte_mempool_get_priv(mp);
+
+       (void)x;
+       (void)addr;
+
+       return 0;
+}
diff --git a/app/chkincs/test_memzone.c b/app/chkincs/test_memzone.c
new file mode 100644 (file)
index 0000000..31c9af6
--- /dev/null
@@ -0,0 +1,61 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_memzone.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_memzone(void)
+{
+       const struct rte_memzone *memzone1;
+
+       memzone1 = rte_memzone_lookup("testzone1");
+       memzone1 = rte_memzone_reserve("testzone1", 100,
+                                      0, 0);
+       rte_memzone_dump();
+
+       (void)memzone1;
+
+       return 0;
+}
diff --git a/app/chkincs/test_pci.c b/app/chkincs/test_pci.c
new file mode 100644 (file)
index 0000000..7af0894
--- /dev/null
@@ -0,0 +1,86 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_pci.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static int my_driver_init(struct rte_pci_driver *dr,
+                         struct rte_pci_device *dev);
+
+struct rte_pci_id my_driver_id[] = {
+       {
+               0x8086,
+               0x10E8,
+               PCI_ANY_ID,
+               PCI_ANY_ID,
+       },
+       {
+               0, 0, 0, 0 /* sentinel */
+       },
+};
+struct rte_pci_driver my_driver = {
+       {0, 0},
+       "test_driver",
+       my_driver_init,
+       my_driver_id,
+       RTE_PCI_DRV_NEED_IGB_UIO,
+};
+
+static int
+my_driver_init(__attribute__((unused)) struct rte_pci_driver *dr,
+              __attribute__((unused))  struct rte_pci_device *dev)
+{
+       return 0;
+}
+
+int
+test_pci(void)
+{
+       struct rte_pci_id id = {RTE_PCI_DEVICE(0, 0)};
+       rte_eal_pci_dump();
+       rte_eal_pci_register(&my_driver);
+       rte_eal_pci_probe();
+       (void)id;
+       return 0;
+}
diff --git a/app/chkincs/test_pci_dev_ids.c b/app/chkincs/test_pci_dev_ids.c
new file mode 100644 (file)
index 0000000..290105c
--- /dev/null
@@ -0,0 +1,60 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include "test.h"
+
+struct A {
+       int x;
+       int y;
+};
+
+static struct A a[] = {
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {vend, dev},
+#include <rte_pci_dev_ids.h>
+};
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_pci_dev_ids(void)
+{
+       return a[0].x;
+}
diff --git a/app/chkincs/test_per_lcore.c b/app/chkincs/test_per_lcore.c
new file mode 100644 (file)
index 0000000..d2fc666
--- /dev/null
@@ -0,0 +1,57 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_per_lcore.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+static RTE_DEFINE_PER_LCORE(unsigned, test) = 0x12345678;
+
+int
+test_per_lcore(void)
+{
+       if (RTE_PER_LCORE(test) != 0x12345678)
+               return -1;
+
+       return 0;
+}
diff --git a/app/chkincs/test_prefetch.c b/app/chkincs/test_prefetch.c
new file mode 100644 (file)
index 0000000..df81f0e
--- /dev/null
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_prefetch.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_prefetch(void)
+{
+       int a;
+
+       rte_prefetch0(&a);
+       rte_prefetch1(&a);
+       rte_prefetch2(&a);
+
+       return 0;
+}
diff --git a/app/chkincs/test_random.c b/app/chkincs/test_random.c
new file mode 100644 (file)
index 0000000..9e10176
--- /dev/null
@@ -0,0 +1,54 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_random.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_random(void)
+{
+       rte_srand(1);
+       rte_rand();
+       return 0;
+}
diff --git a/app/chkincs/test_ring.c b/app/chkincs/test_ring.c
new file mode 100644 (file)
index 0000000..5e37a6a
--- /dev/null
@@ -0,0 +1,97 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_ring.h>
+
+#include "test.h"
+
+#define MAX_BULK 16
+#define RING_SIZE 4096
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_ring(void)
+{
+       struct rte_ring *r;
+       void *ptrs[MAX_BULK];
+       int x;
+
+       r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
+       if (r == 0) {
+               return -1;
+       }
+       rte_ring_dump(r);
+
+       rte_ring_set_bulk_count(r, MAX_BULK);
+       rte_ring_set_water_mark(r, 50);
+
+       rte_ring_sp_enqueue_bulk(r, &ptrs[0], 1);
+       rte_ring_mp_enqueue_bulk(r, &ptrs[0], 1);
+       rte_ring_sp_enqueue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_mp_enqueue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_enqueue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_enqueue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_sp_enqueue(r, &ptrs[0]);
+       rte_ring_mp_enqueue(r, &ptrs[0]);
+       rte_ring_enqueue(r, &ptrs[0]);
+
+       rte_ring_sc_dequeue_bulk(r, &ptrs[0], 1);
+       rte_ring_sc_dequeue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_mc_dequeue_bulk(r, &ptrs[0], 1);
+       rte_ring_mc_dequeue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_dequeue_bulk(r, &ptrs[0], 1);
+       rte_ring_dequeue_bulk(r, &ptrs[0], MAX_BULK);
+       rte_ring_sc_dequeue(r, &ptrs[0]);
+       rte_ring_mc_dequeue(r, &ptrs[0]);
+       rte_ring_dequeue(r, &ptrs[0]);
+
+       __RING_STAT_ADD(r, enq_fail, 10);
+
+       x = rte_ring_full(r);
+       x = rte_ring_empty(r);
+       x = rte_ring_count(r);
+       x = rte_ring_free_count(r);
+
+       (void)x;
+
+       return 0;
+}
diff --git a/app/chkincs/test_rwlock.c b/app/chkincs/test_rwlock.c
new file mode 100644 (file)
index 0000000..20ab519
--- /dev/null
@@ -0,0 +1,60 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_rwlock.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_rwlock(void)
+{
+       rte_rwlock_t rwl = RTE_RWLOCK_INITIALIZER;
+
+       rte_rwlock_init(&rwl);
+       rte_rwlock_write_lock(&rwl);
+       rte_rwlock_write_unlock(&rwl);
+       rte_rwlock_read_lock(&rwl);
+       rte_rwlock_read_unlock(&rwl);
+
+       return 0;
+}
diff --git a/app/chkincs/test_sctp.c b/app/chkincs/test_sctp.c
new file mode 100644 (file)
index 0000000..11b6b78
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_sctp.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_sctp(void)
+{
+       return 0;
+}
diff --git a/app/chkincs/test_spinlock.c b/app/chkincs/test_spinlock.c
new file mode 100644 (file)
index 0000000..eb538df
--- /dev/null
@@ -0,0 +1,59 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_spinlock.h>
+
+#include "test.h"
+
+static rte_spinlock_t sl = RTE_SPINLOCK_INITIALIZER;
+static rte_spinlock_recursive_t slr = RTE_SPINLOCK_RECURSIVE_INITIALIZER;
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_spinlock(void)
+{
+       rte_spinlock_init(&sl);
+       rte_spinlock_lock(&sl);
+       rte_spinlock_unlock(&sl);
+       rte_spinlock_recursive_lock(&slr);
+       return 0;
+}
diff --git a/app/chkincs/test_string_fns.c b/app/chkincs/test_string_fns.c
new file mode 100644 (file)
index 0000000..09a24de
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_string_fns.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_string_fns(void)
+{
+       return 0;
+}
diff --git a/app/chkincs/test_tailq.c b/app/chkincs/test_tailq.c
new file mode 100644 (file)
index 0000000..4730e1c
--- /dev/null
@@ -0,0 +1,55 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_tailq.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_tailq(void)
+{
+       struct rte_dummy *t1, *t2;
+       t1 = RTE_TAILQ_RESERVE("dummy", rte_dummy);
+       t2 = RTE_TAILQ_LOOKUP("dummy", rte_dummy);
+       return (t1 == t2) ? 0 : -1;
+}
diff --git a/app/chkincs/test_tcp.c b/app/chkincs/test_tcp.c
new file mode 100644 (file)
index 0000000..96e54a6
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_tcp.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_tcp(void)
+{
+       return 0;
+}
diff --git a/app/chkincs/test_timer.c b/app/chkincs/test_timer.c
new file mode 100644 (file)
index 0000000..ea63b42
--- /dev/null
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+
+#include <rte_timer.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+/* timer callback for basic tests */
+static void
+timer_cb(__attribute__((unused)) struct rte_timer *tim,
+        __attribute__((unused)) void *arg)
+{
+       return;
+}
+
+int
+test_timer(void)
+{
+       int x;
+       struct rte_timer tim = RTE_TIMER_INITIALIZER;
+
+       rte_timer_subsystem_init();
+       rte_timer_init(&tim);
+       rte_timer_reset(&tim, 1234, SINGLE, 0, timer_cb, &x);
+       rte_timer_stop(&tim);
+       rte_timer_reset_sync(&tim, 1234, SINGLE, 0, timer_cb, &x);
+       rte_timer_stop_sync(&tim);
+       x = rte_timer_pending(&tim);
+       rte_timer_manage();
+       rte_timer_dump_stats();
+
+       return 0;
+}
diff --git a/app/chkincs/test_udp.c b/app/chkincs/test_udp.c
new file mode 100644 (file)
index 0000000..9ccb5ba
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include "test.h"
+
+#include <rte_udp.h>
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_udp(void)
+{
+       return 0;
+}
diff --git a/app/chkincs/test_version.c b/app/chkincs/test_version.c
new file mode 100644 (file)
index 0000000..e518a67
--- /dev/null
@@ -0,0 +1,52 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <rte_version.h>
+
+#include "test.h"
+
+/*
+ *      ^
+ *     / \
+ *    / | \     WARNING: this test program does *not* show how to use the
+ *   /  .  \    API. Its only goal is to check dependencies of include files.
+ *  /_______\
+ */
+
+int
+test_version(void)
+{
+       return 1;
+}
diff --git a/app/dump_cfg/Makefile b/app/dump_cfg/Makefile
new file mode 100644 (file)
index 0000000..916166a
--- /dev/null
@@ -0,0 +1,49 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = dump_cfg
+
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := dump_cfg_main.c
+
+# this application needs libraries first
+DEPDIRS-y += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/dump_cfg/dump_cfg_main.c b/app/dump_cfg/dump_cfg_main.c
new file mode 100644 (file)
index 0000000..9227f35
--- /dev/null
@@ -0,0 +1,229 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <string.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <limits.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+
+
+/* some functions used for printing out the memory segments and memory zones information */
+
+#define PRINT_STR_FIELD(structname, field) do{ \
+       count+= rte_snprintf(buf + count, len-count, " %s='%s',", \
+                       #field, (const char *)structname->field);\
+} while(0)
+
+#define PRINT_PTR_FIELD(structname, field) do{ \
+       count+= rte_snprintf(buf + count, len-count, " %s=%p,", \
+                       #field, (void *)structname->field);\
+} while(0)
+
+#define PRINT_UINT_FIELD(structname, field) do{ \
+       count+= rte_snprintf(buf + count, len-count, " %s=%llu,", \
+                       #field, (unsigned long long)structname->field);\
+} while(0)
+
+#define PRINT_INT_FIELD(structname, field) do{ \
+       count+= rte_snprintf(buf + count, len-count, " %s=%lld,", \
+                       #field, (long long)structname->field);\
+} while(0)
+
+#define PRINT_CUSTOM_FIELD(structname, field, print_fn) do{ \
+       char buf2[1024]; \
+       count+= rte_snprintf(buf + count, len-count, " %s=%s,", \
+                       #field, print_fn(structname->field, buf2, sizeof(buf2)));\
+} while(0)
+
+static inline const char *
+memseg_to_str(const struct rte_memseg *seg, char *buf, size_t len)
+{
+       int count = 0;
+       count += rte_snprintf(buf + count, len - count, "{");
+       PRINT_UINT_FIELD(seg, phys_addr);
+       PRINT_PTR_FIELD(seg, addr);
+       PRINT_UINT_FIELD(seg, len);
+       PRINT_INT_FIELD(seg, socket_id);
+       PRINT_UINT_FIELD(seg, hugepage_sz);
+       PRINT_UINT_FIELD(seg, nchannel);
+       PRINT_UINT_FIELD(seg, nrank);
+       rte_snprintf(buf + count - 1, len - count + 1, " }");
+       return buf;
+}
+
+static inline const char *
+memzone_to_str(const struct rte_memzone *zone, char *buf, size_t len)
+{
+       int count = 0;
+       count += rte_snprintf(buf + count, len - count, "{");
+       PRINT_STR_FIELD(zone, name);
+       PRINT_UINT_FIELD(zone, phys_addr);
+       PRINT_PTR_FIELD(zone, addr);
+       PRINT_UINT_FIELD(zone, len);
+       PRINT_INT_FIELD(zone, socket_id);
+       PRINT_UINT_FIELD(zone, flags);
+       rte_snprintf(buf + count - 1, len - count + 1, " }");
+       return buf;
+}
+
+static inline const char *
+tailq_to_str(const struct rte_tailq_head *tailq, char *buf, size_t len)
+{
+       int count = 0;
+       count += rte_snprintf(buf + count, len - count, "{");
+       PRINT_STR_FIELD(tailq, qname);
+       const struct rte_dummy_head *head = &tailq->tailq_head;
+       PRINT_PTR_FIELD(head, tqh_first);
+       PRINT_PTR_FIELD(head, tqh_last);
+       rte_snprintf(buf + count - 1, len - count + 1, " }");
+       return buf;
+}
+
+#define PREFIX    "prefix"
+static const char *directory = "/var/run";
+static const char *pre = "rte";
+
+static void
+usage(const char *prgname)
+{
+       printf("%s --prefix <prefix>\n\n"
+                       "dump_config option list:\n"
+                       "\t--"PREFIX": filename prefix\n",
+                       prgname);
+}
+
+static int
+dmp_cfg_parse_args(int argc, char **argv)
+{
+       const char *prgname = argv[0];
+       const char *home_dir = getenv("HOME");
+       int opt;
+       int option_index;
+       static struct option lgopts[] = {
+                       {PREFIX, 1, 0, 0},
+                       {0, 0, 0, 0}
+       };
+
+       if (getuid() != 0 && home_dir != NULL)
+               directory = home_dir;
+
+       while ((opt = getopt_long(argc, argv, "",
+                       lgopts, &option_index)) != EOF) {
+               switch (opt) {
+               case 0:
+                       if (!strcmp(lgopts[option_index].name, PREFIX))
+                               pre = optarg;
+                       else{
+                               usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               default:
+                       usage(prgname);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+       char buffer[1024];
+       char path[PATH_MAX];
+       int i;
+       int fd = 0;
+
+       dmp_cfg_parse_args(argc, argv);
+       rte_snprintf(path, sizeof(path), "%s/.%s_config",  directory, pre);
+       printf("Path to mem_config: %s\n\n", path);
+
+       fd = open(path, O_RDWR);
+       if (fd < 0){
+               printf("Error with config open\n");
+               return 1;
+       }
+       struct rte_mem_config *cfg = mmap(NULL, sizeof(*cfg), PROT_READ, \
+                       MAP_SHARED, fd, 0);
+       if (cfg == NULL){
+               printf("Error with config mmap\n");
+               close(fd);
+       return 1;
+       }
+       close(fd);
+
+       printf("----------- MEMORY_SEGMENTS -------------\n");
+       for (i = 0; i < RTE_MAX_MEMSEG; i++){
+               if (cfg->memseg[i].addr == NULL) break;
+               printf("Segment %d: ", i);
+               printf("%s\n", memseg_to_str(&cfg->memseg[i], buffer, sizeof(buffer)));
+       }
+       printf("--------- END_MEMORY_SEGMENTS -----------\n");
+
+       printf("------------ MEMORY_ZONES ---------------\n");
+       for (i = 0; i < RTE_MAX_MEMZONE; i++){
+               if (cfg->memzone[i].addr == NULL) break;
+               printf("Zone %d: ", i);
+               printf("%s\n", memzone_to_str(&cfg->memzone[i], buffer, sizeof(buffer)));
+
+       }
+       printf("---------- END_MEMORY_ZONES -------------\n");
+
+       printf("------------- TAIL_QUEUES ---------------\n");
+       for (i = 0; i < RTE_MAX_TAILQ; i++){
+               if (cfg->tailq_head[i].qname[0] == '\0') break;
+               printf("Tailq %d: ", i);
+               printf("%s\n", tailq_to_str(&cfg->tailq_head[i], buffer, sizeof(buffer)));
+
+       }
+       printf("----------- END_TAIL_QUEUES -------------\n");
+
+       return 0;
+}
+
diff --git a/app/test-pmd/Makefile b/app/test-pmd/Makefile
new file mode 100644 (file)
index 0000000..bad337a
--- /dev/null
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+APP = testpmd
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_TEST_PMD) := testpmd.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += parameters.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += cmdline.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += config.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += iofwd.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += macfwd.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += rxonly.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += txonly.c
+SRCS-$(CONFIG_RTE_TEST_PMD) += csumonly.c
+ifeq ($(CONFIG_RTE_LIBRTE_IEEE1588),y)
+SRCS-$(CONFIG_RTE_TEST_PMD) += ieee1588fwd.c
+endif
+
+# this application needs libraries first
+DEPDIRS-$(CONFIG_RTE_TEST_PMD) += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
new file mode 100644 (file)
index 0000000..28233a6
--- /dev/null
@@ -0,0 +1,2180 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <string.h>
+#include <termios.h>
+#include <unistd.h>
+#include <inttypes.h>
+#ifndef __linux__
+#include <net/socket.h>
+#endif
+#include <netinet/in.h>
+
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_etheraddr.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include "testpmd.h"
+
+/* *** HELP *** */
+struct cmd_help_result {
+       cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+                           struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       cmdline_printf(cl,
+                      "\n"
+                      "TEST PMD\n"
+                      "--------\n"
+                      "\n"
+                      "This commandline can be used to configure forwarding\n"
+                      "\n");
+       cmdline_printf(cl,
+                      "Display informations:\n"
+                      "---------------------\n"
+                      "- show port info|stats|fdir X|all\n"
+                      "    Diplays information or stats on port X, or all\n"
+                      "- clear port stats X|all\n"
+                      "    Clear stats for port X, or all\n"
+                      "- show config rxtx|cores|fwd\n"
+                      "    Displays the given configuration\n"
+                      "- read reg port_id reg_off\n"
+                      "    Displays value of a port register\n"
+                      "- read regfield port_id reg_off bit_x bit_y\n"
+                      "    Displays value of a port register bit field\n"
+                      "- read regbit port_id reg_off bit_x\n"
+                      "    Displays value of a port register bit\n"
+                      "- read rxd port_id queue_id rxd_id\n"
+                      "    Displays a RX descriptor of a port RX queue\n"
+                      "- read txd port_id queue_id txd_id\n"
+                      "    Displays a TX descriptor of a port TX queue\n"
+                      "\n");
+       cmdline_printf(cl,
+                      "Configure:\n"
+                      "----------\n"
+                      "Modifications are taken into account once "
+                      "forwarding is restarted.\n"
+                      "- set default\n"
+                      "    Set forwarding to default configuration\n"
+                      "- set nbport|nbcore|burst|verbose X\n"
+                      "    Set number of ports, number of cores, number "
+                      "of packets per burst,\n    or verbose level to X\n"
+                      "- set txpkts x[,y]*\n"
+                      "    Set the length of each segment of TXONLY packets\n"
+                      "- set coremask|portmask X\n"
+                      "    Set the hexadecimal mask of forwarding cores / "
+                      "forwarding ports\n"
+                      "- set corelist|portlist x[,y]*\n"
+                      "    Set the list of forwarding cores / forwarding "
+                      "ports\n"
+                      "- rx_vlan add/rm vlan_id|all port_id\n"
+                      "    Add/remove vlan_id, or all identifiers, to/from "
+                      "the set of VLAN Identifiers\n    filtered by port_id\n"
+                      "- tx_vlan set vlan_id port_id\n"
+                      "    Enable hardware insertion of a VLAN header with "
+                      "the Tag Identifier vlan_id\n    in packets sent on"
+                      "port_id\n"
+                      "- tx_vlan reset port_id\n"
+                      "    Disable hardware insertion of a VLAN header in "
+                      "packets sent on port_id\n"
+                      "- tx_checksum set mask port_id\n"
+                      "    Enable hardware insertion of checksum offload with "
+                      "the 4-bit mask (0~0xf)\n    in packets sent on port_id\n"
+                      "    Please check the NIC datasheet for HW limits\n"
+                      "      bit 0 - insert ip checksum offload if set \n"
+                      "      bit 1 - insert udp checksum offload if set \n"
+                      "      bit 2 - insert tcp checksum offload if set\n"
+                      "      bit 3 - insert sctp checksum offload if set\n"
+#ifdef RTE_LIBRTE_IEEE1588
+                      "- set fwd io|mac|rxonly|txonly|csum|ieee1588\n"
+                      "    Set IO, MAC, RXONLY, TXONLY, CSUM or IEEE1588 "
+                      "packet forwarding mode\n"
+#else
+                      "- set fwd io|mac|rxonly|txonly|csum\n"
+                      "    Set IO, MAC, RXONLY, CSUM or TXONLY packet "
+                      "forwarding mode\n"
+#endif
+                      "- mac_addr add|remove X <xx:xx:xx:xx:xx:xx>\n"
+                      "    Add/Remove the MAC address <xx:xx:xx:xx:xx:xx> on port X\n"
+                      "- set promisc|allmulti [all|X] on|off\n"
+                      "    Set/unset promisc|allmulti mode on port X, or all\n"
+                      "- set flow_ctrl rx on|off tx on|off high_water low_water "
+                                               "pause_time send_xon port_id \n"
+                      "    Set the link flow control parameter on the port \n"
+                      "- write reg port_id reg_off value\n"
+                      "    Set value of a port register\n"
+                      "- write regfield port_id reg_off bit_x bit_y value\n"
+                      "    Set bit field value of a port register\n"
+                      "- write regbit port_id reg_off bit_x value\n"
+                      "    Set bit value of a port register\n"
+                      "\n");
+       cmdline_printf(cl,
+                      "Control forwarding:\n"
+                      "-------------------\n"
+                      "- start\n"
+                      "    Start packet forwarding with current config\n"
+                      "- start tx_first\n"
+                      "    Start packet forwarding with current config"
+                      " after sending one burst\n    of packets\n"
+                      "- stop\n"
+                      "    Stop packet forwarding, and displays accumulated"
+                      " stats\n"
+                      "\n");
+       cmdline_printf(cl,
+                      "Flow director mode:\n"
+                      "-------------------\n"
+                      "- add_signature_filter port_id ip|udp|tcp|sctp src\n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id queue queue_id\n"
+                      "- upd_signature_filter port_id ip|udp|tcp|sctp src \n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id queue queue_id\n"
+                      "- rm_signature_filter port_id ip|udp|tcp|sctp src\n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id\n"
+                      "- add_perfect_filter port_id ip|udp|tcp|sctp src\n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id queue \n"
+                      "    queue_id soft soft_id\n"
+                      "- upd_perfect_filter port_id ip|udp|tcp|sctp src\n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id queue queue_id\n"
+                      "- rm_perfect_filter port_id ip|udp|tcp|sctp src\n"
+                      "    ip_src_address port_src dst ip_dst_address port_dst\n"
+                      "    flexbytes flexbytes_values vlan vlan_id soft soft_id\n"
+                      "- set_masks_filter port_id only_ip_flow 0|1 src_mask\n"
+                      "    ip_src_mask  port_src_mask dst_mask ip_dst_mask\n"
+                      "    port_dst_mask flexbytes 0|1 vlan_id 0|1 vlan_prio 0|1\n"
+                      "\n");
+       cmdline_printf(cl,
+                      "Misc:\n"
+                      "-----\n"
+                      "- quit\n"
+                      "    Quit to prompt in linux, and reboot on baremetal\n"
+                      "\n");
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+       TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+       .f = cmd_help_parsed,
+       .data = NULL,
+       .help_str = "show help",
+       .tokens = {
+               (void *)&cmd_help_help,
+               NULL,
+       },
+};
+
+/* *** stop *** */
+struct cmd_stop_result {
+       cmdline_fixed_string_t stop;
+};
+
+static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
+                           __attribute__((unused)) struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       stop_packet_forwarding();
+}
+
+cmdline_parse_token_string_t cmd_stop_stop =
+       TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
+
+cmdline_parse_inst_t cmd_stop = {
+       .f = cmd_stop_parsed,
+       .data = NULL,
+       .help_str = "stop - stop packet forwarding",
+       .tokens = {
+               (void *)&cmd_stop_stop,
+               NULL,
+       },
+};
+
+/* *** SET CORELIST and PORTLIST CONFIGURATION *** */
+
+static unsigned int
+parse_item_list(char* str, const char* item_name, unsigned int max_items,
+               unsigned int *parsed_items, int check_unique_values)
+{
+       unsigned int nb_item;
+       unsigned int value;
+       unsigned int i;
+       unsigned int j;
+       int value_ok;
+       char c;
+
+       /*
+        * First parse all items in the list and store their value.
+        */
+       value = 0;
+       nb_item = 0;
+       value_ok = 0;
+       for (i = 0; i < strnlen(str, STR_TOKEN_SIZE); i++) {
+               c = str[i];
+               if ((c >= '0') && (c <= '9')) {
+                       value = (unsigned int) (value * 10 + (c - '0'));
+                       value_ok = 1;
+                       continue;
+               }
+               if (c != ',') {
+                       printf("character %c is not a decimal digit\n", c);
+                       return (0);
+               }
+               if (! value_ok) {
+                       printf("No valid value before comma\n");
+                       return (0);
+               }
+               if (nb_item < max_items) {
+                       parsed_items[nb_item] = value;
+                       value_ok = 0;
+                       value = 0;
+               }
+               nb_item++;
+       }
+       if (nb_item >= max_items) {
+               printf("Number of %s = %u > %u (maximum items)\n",
+                      item_name, nb_item + 1, max_items);
+               return (0);
+       }
+       parsed_items[nb_item++] = value;
+       if (! check_unique_values)
+               return (nb_item);
+
+       /*
+        * Then, check that all values in the list are differents.
+        * No optimization here...
+        */
+       for (i = 0; i < nb_item; i++) {
+               for (j = i + 1; j < nb_item; j++) {
+                       if (parsed_items[j] == parsed_items[i]) {
+                               printf("duplicated %s %u at index %u and %u\n",
+                                      item_name, parsed_items[i], i, j);
+                               return (0);
+                       }
+               }
+       }
+       return (nb_item);
+}
+
+struct cmd_set_list_result {
+       cmdline_fixed_string_t cmd_keyword;
+       cmdline_fixed_string_t list_name;
+       cmdline_fixed_string_t list_of_items;
+};
+
+static void cmd_set_list_parsed(void *parsed_result,
+                               __attribute__((unused)) struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_set_list_result *res;
+       union {
+               unsigned int lcorelist[RTE_MAX_LCORE];
+               unsigned int portlist[RTE_MAX_ETHPORTS];
+       } parsed_items;
+       unsigned int nb_item;
+
+       res = parsed_result;
+       if (!strcmp(res->list_name, "corelist")) {
+               nb_item = parse_item_list(res->list_of_items, "core",
+                                         RTE_MAX_LCORE,
+                                         parsed_items.lcorelist, 1);
+               if (nb_item > 0)
+                       set_fwd_lcores_list(parsed_items.lcorelist, nb_item);
+               return;
+       }
+       if (!strcmp(res->list_name, "portlist")) {
+               nb_item = parse_item_list(res->list_of_items, "port",
+                                         RTE_MAX_ETHPORTS,
+                                         parsed_items.portlist, 1);
+               if (nb_item > 0)
+                       set_fwd_ports_list(parsed_items.portlist, nb_item);
+       }
+}
+
+cmdline_parse_token_string_t cmd_set_list_keyword =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, cmd_keyword,
+                                "set");
+cmdline_parse_token_string_t cmd_set_list_name =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_name,
+                                "corelist#portlist");
+cmdline_parse_token_string_t cmd_set_list_of_items =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_list_result, list_of_items,
+                                NULL);
+
+cmdline_parse_inst_t cmd_set_fwd_list = {
+       .f = cmd_set_list_parsed,
+       .data = NULL,
+       .help_str = "set corelist|portlist x[,y]*",
+       .tokens = {
+               (void *)&cmd_set_list_keyword,
+               (void *)&cmd_set_list_name,
+               (void *)&cmd_set_list_of_items,
+               NULL,
+       },
+};
+
+/* *** SET COREMASK and PORTMASK CONFIGURATION *** */
+
+struct cmd_setmask_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t mask;
+       uint64_t hexavalue;
+};
+
+static void cmd_set_mask_parsed(void *parsed_result,
+                               __attribute__((unused)) struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_setmask_result *res = parsed_result;
+
+       if (!strcmp(res->mask, "coremask"))
+               set_fwd_lcores_mask(res->hexavalue);
+       else if (!strcmp(res->mask, "portmask"))
+               set_fwd_ports_mask(res->hexavalue);
+}
+
+cmdline_parse_token_string_t cmd_setmask_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, set, "set");
+cmdline_parse_token_string_t cmd_setmask_mask =
+       TOKEN_STRING_INITIALIZER(struct cmd_setmask_result, mask,
+                                "coremask#portmask");
+cmdline_parse_token_num_t cmd_setmask_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_setmask_result, hexavalue, UINT64);
+
+cmdline_parse_inst_t cmd_set_fwd_mask = {
+       .f = cmd_set_mask_parsed,
+       .data = NULL,
+       .help_str = "set coremask|portmask hexadecimal value",
+       .tokens = {
+               (void *)&cmd_setmask_set,
+               (void *)&cmd_setmask_mask,
+               (void *)&cmd_setmask_value,
+               NULL,
+       },
+};
+
+/*
+ * SET NBPORT, NBCORE, PACKET BURST, and VERBOSE LEVEL CONFIGURATION
+ */
+struct cmd_set_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t what;
+       uint16_t value;
+};
+
+static void cmd_set_parsed(void *parsed_result,
+                          __attribute__((unused)) struct cmdline *cl,
+                          __attribute__((unused)) void *data)
+{
+       struct cmd_set_result *res = parsed_result;
+       if (!strcmp(res->what, "nbport"))
+               set_fwd_ports_number(res->value);
+       else if (!strcmp(res->what, "nbcore"))
+               set_fwd_lcores_number(res->value);
+       else if (!strcmp(res->what, "burst"))
+               set_nb_pkt_per_burst(res->value);
+       else if (!strcmp(res->what, "verbose"))
+               set_verbose_level(res->value);
+}
+
+cmdline_parse_token_string_t cmd_set_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_result, set, "set");
+cmdline_parse_token_string_t cmd_set_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_result, what,
+                                "nbport#nbcore#burst#verbose");
+cmdline_parse_token_num_t cmd_set_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_set_result, value, UINT16);
+
+cmdline_parse_inst_t cmd_set_numbers = {
+       .f = cmd_set_parsed,
+       .data = NULL,
+       .help_str = "set nbport|nbcore|burst|verbose value",
+       .tokens = {
+               (void *)&cmd_set_set,
+               (void *)&cmd_set_what,
+               (void *)&cmd_set_value,
+               NULL,
+       },
+};
+
+/* *** SET SEGMENT LENGTHS OF TXONLY PACKETS *** */
+
+struct cmd_set_txpkts_result {
+       cmdline_fixed_string_t cmd_keyword;
+       cmdline_fixed_string_t txpkts;
+       cmdline_fixed_string_t seg_lengths;
+};
+
+static void
+cmd_set_txpkts_parsed(void *parsed_result,
+                     __attribute__((unused)) struct cmdline *cl,
+                     __attribute__((unused)) void *data)
+{
+       struct cmd_set_txpkts_result *res;
+       unsigned seg_lengths[RTE_MAX_SEGS_PER_PKT];
+       unsigned int nb_segs;
+
+       res = parsed_result;
+       nb_segs = parse_item_list(res->seg_lengths, "segment lengths",
+                                 RTE_MAX_SEGS_PER_PKT, seg_lengths, 0);
+       if (nb_segs > 0)
+               set_tx_pkt_segments(seg_lengths, nb_segs);
+}
+
+cmdline_parse_token_string_t cmd_set_txpkts_keyword =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+                                cmd_keyword, "set");
+cmdline_parse_token_string_t cmd_set_txpkts_name =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+                                txpkts, "txpkts");
+cmdline_parse_token_string_t cmd_set_txpkts_lengths =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_txpkts_result,
+                                seg_lengths, NULL);
+
+cmdline_parse_inst_t cmd_set_txpkts = {
+       .f = cmd_set_txpkts_parsed,
+       .data = NULL,
+       .help_str = "set txpkts x[,y]*",
+       .tokens = {
+               (void *)&cmd_set_txpkts_keyword,
+               (void *)&cmd_set_txpkts_name,
+               (void *)&cmd_set_txpkts_lengths,
+               NULL,
+       },
+};
+
+/* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */
+struct cmd_rx_vlan_filter_all_result {
+       cmdline_fixed_string_t rx_vlan;
+       cmdline_fixed_string_t what;
+       cmdline_fixed_string_t all;
+       uint8_t port_id;
+};
+
+static void
+cmd_rx_vlan_filter_all_parsed(void *parsed_result,
+                             __attribute__((unused)) struct cmdline *cl,
+                             __attribute__((unused)) void *data)
+{
+       struct cmd_rx_vlan_filter_all_result *res = parsed_result;
+
+       if (!strcmp(res->what, "add"))
+               rx_vlan_all_filter_set(res->port_id, 1);
+       else
+               rx_vlan_all_filter_set(res->port_id, 0);
+}
+
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_rx_vlan =
+       TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+                                rx_vlan, "rx_vlan");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+                                what, "add#rm");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_all_all =
+       TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+                                all, "all");
+cmdline_parse_token_num_t cmd_rx_vlan_filter_all_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_all_result,
+                             port_id, UINT8);
+
+cmdline_parse_inst_t cmd_rx_vlan_filter_all = {
+       .f = cmd_rx_vlan_filter_all_parsed,
+       .data = NULL,
+       .help_str = "add/remove all identifiers to/from the set of VLAN "
+       "Identifiers filtered by a port",
+       .tokens = {
+               (void *)&cmd_rx_vlan_filter_all_rx_vlan,
+               (void *)&cmd_rx_vlan_filter_all_what,
+               (void *)&cmd_rx_vlan_filter_all_all,
+               (void *)&cmd_rx_vlan_filter_all_portid,
+               NULL,
+       },
+};
+
+/* *** ADD/REMOVE A VLAN IDENTIFIER TO/FROM A PORT VLAN RX FILTER *** */
+struct cmd_rx_vlan_filter_result {
+       cmdline_fixed_string_t rx_vlan;
+       cmdline_fixed_string_t what;
+       uint16_t vlan_id;
+       uint8_t port_id;
+};
+
+static void
+cmd_rx_vlan_filter_parsed(void *parsed_result,
+                         __attribute__((unused)) struct cmdline *cl,
+                         __attribute__((unused)) void *data)
+{
+       struct cmd_rx_vlan_filter_result *res = parsed_result;
+
+       if (!strcmp(res->what, "add"))
+               rx_vlan_filter_set(res->port_id, res->vlan_id, 1);
+       else
+               rx_vlan_filter_set(res->port_id, res->vlan_id, 0);
+}
+
+cmdline_parse_token_string_t cmd_rx_vlan_filter_rx_vlan =
+       TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result,
+                                rx_vlan, "rx_vlan");
+cmdline_parse_token_string_t cmd_rx_vlan_filter_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_rx_vlan_filter_result,
+                                what, "add#rm");
+cmdline_parse_token_num_t cmd_rx_vlan_filter_vlanid =
+       TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result,
+                             vlan_id, UINT16);
+cmdline_parse_token_num_t cmd_rx_vlan_filter_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_rx_vlan_filter_result,
+                             port_id, UINT8);
+
+cmdline_parse_inst_t cmd_rx_vlan_filter = {
+       .f = cmd_rx_vlan_filter_parsed,
+       .data = NULL,
+       .help_str = "add/remove a VLAN identifier to/from the set of VLAN "
+       "Identifiers filtered by a port",
+       .tokens = {
+               (void *)&cmd_rx_vlan_filter_rx_vlan,
+               (void *)&cmd_rx_vlan_filter_what,
+               (void *)&cmd_rx_vlan_filter_vlanid,
+               (void *)&cmd_rx_vlan_filter_portid,
+               NULL,
+       },
+};
+
+/* *** ENABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */
+struct cmd_tx_vlan_set_result {
+       cmdline_fixed_string_t tx_vlan;
+       cmdline_fixed_string_t set;
+       uint16_t vlan_id;
+       uint8_t port_id;
+};
+
+static void
+cmd_tx_vlan_set_parsed(void *parsed_result,
+                      __attribute__((unused)) struct cmdline *cl,
+                      __attribute__((unused)) void *data)
+{
+       struct cmd_tx_vlan_set_result *res = parsed_result;
+
+       tx_vlan_set(res->port_id, res->vlan_id);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_set_tx_vlan =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result,
+                                tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_set_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_set_result,
+                                set, "set");
+cmdline_parse_token_num_t cmd_tx_vlan_set_vlanid =
+       TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
+                             vlan_id, UINT16);
+cmdline_parse_token_num_t cmd_tx_vlan_set_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_set_result,
+                             port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tx_vlan_set = {
+       .f = cmd_tx_vlan_set_parsed,
+       .data = NULL,
+       .help_str = "enable hardware insertion of a VLAN header with a given "
+       "TAG Identifier in packets sent on a port",
+       .tokens = {
+               (void *)&cmd_tx_vlan_set_tx_vlan,
+               (void *)&cmd_tx_vlan_set_set,
+               (void *)&cmd_tx_vlan_set_vlanid,
+               (void *)&cmd_tx_vlan_set_portid,
+               NULL,
+       },
+};
+
+/* *** DISABLE HARDWARE INSERTION OF VLAN HEADER IN TX PACKETS *** */
+struct cmd_tx_vlan_reset_result {
+       cmdline_fixed_string_t tx_vlan;
+       cmdline_fixed_string_t reset;
+       uint8_t port_id;
+};
+
+static void
+cmd_tx_vlan_reset_parsed(void *parsed_result,
+                        __attribute__((unused)) struct cmdline *cl,
+                        __attribute__((unused)) void *data)
+{
+       struct cmd_tx_vlan_reset_result *res = parsed_result;
+
+       tx_vlan_reset(res->port_id);
+}
+
+cmdline_parse_token_string_t cmd_tx_vlan_reset_tx_vlan =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result,
+                                tx_vlan, "tx_vlan");
+cmdline_parse_token_string_t cmd_tx_vlan_reset_reset =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_vlan_reset_result,
+                                reset, "reset");
+cmdline_parse_token_num_t cmd_tx_vlan_reset_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_tx_vlan_reset_result,
+                             port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tx_vlan_reset = {
+       .f = cmd_tx_vlan_reset_parsed,
+       .data = NULL,
+       .help_str = "disable hardware insertion of a VLAN header in packets "
+       "sent on a port",
+       .tokens = {
+               (void *)&cmd_tx_vlan_reset_tx_vlan,
+               (void *)&cmd_tx_vlan_reset_reset,
+               (void *)&cmd_tx_vlan_reset_portid,
+               NULL,
+       },
+};
+
+
+/* *** ENABLE HARDWARE INSERTION OF CHECKSUM IN TX PACKETS *** */
+struct cmd_tx_cksum_set_result {
+       cmdline_fixed_string_t tx_cksum;
+       cmdline_fixed_string_t set;
+       uint8_t cksum_mask;
+       uint8_t port_id;
+};
+
+static void
+cmd_tx_cksum_set_parsed(void *parsed_result,
+                      __attribute__((unused)) struct cmdline *cl,
+                      __attribute__((unused)) void *data)
+{
+       struct cmd_tx_cksum_set_result *res = parsed_result;
+
+       tx_cksum_set(res->port_id, res->cksum_mask);
+}
+
+cmdline_parse_token_string_t cmd_tx_cksum_set_tx_cksum =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_cksum_set_result,
+                               tx_cksum, "tx_checksum");
+cmdline_parse_token_string_t cmd_tx_cksum_set_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_tx_cksum_set_result,
+                               set, "set");
+cmdline_parse_token_num_t cmd_tx_cksum_set_cksum_mask =
+       TOKEN_NUM_INITIALIZER(struct cmd_tx_cksum_set_result,
+                               cksum_mask, UINT8);
+cmdline_parse_token_num_t cmd_tx_cksum_set_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_tx_cksum_set_result,
+                               port_id, UINT8);
+
+cmdline_parse_inst_t cmd_tx_cksum_set = {
+       .f = cmd_tx_cksum_set_parsed,
+       .data = NULL,
+       .help_str = "enable hardware insertion of L3/L4checksum with a given "
+       "mask in packets sent on a port, the bit mapping is given as, Bit 0 for ip"
+       "Bit 1 for UDP, Bit 2 for TCP, Bit 3 for SCTP",
+       .tokens = {
+               (void *)&cmd_tx_cksum_set_tx_cksum,
+               (void *)&cmd_tx_cksum_set_set,
+               (void *)&cmd_tx_cksum_set_cksum_mask,
+               (void *)&cmd_tx_cksum_set_portid,
+               NULL,
+       },
+};
+
+/* *** SET FORWARDING MODE *** */
+struct cmd_set_fwd_mode_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t fwd;
+       cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_fwd_mode_parsed(void *parsed_result,
+                                   __attribute__((unused)) struct cmdline *cl,
+                                   __attribute__((unused)) void *data)
+{
+       struct cmd_set_fwd_mode_result *res = parsed_result;
+
+       set_pkt_forwarding_mode(res->mode);
+}
+
+cmdline_parse_token_string_t cmd_setfwd_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setfwd_fwd =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, fwd, "fwd");
+cmdline_parse_token_string_t cmd_setfwd_mode =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_fwd_mode_result, mode,
+#ifdef RTE_LIBRTE_IEEE1588
+                                "io#mac#rxonly#txonly#csum#ieee1588");
+#else
+                                "io#mac#rxonly#txonly#csum");
+#endif
+
+cmdline_parse_inst_t cmd_set_fwd_mode = {
+       .f = cmd_set_fwd_mode_parsed,
+       .data = NULL,
+#ifdef RTE_LIBRTE_IEEE1588
+       .help_str = "set fwd io|mac|rxonly|txonly|csum|ieee1588 - set IO, MAC,"
+       " RXONLY, TXONLY, CSUM or IEEE1588 packet forwarding mode",
+#else
+       .help_str = "set fwd io|mac|rxonly|txonly|csum - set IO, MAC,"
+       " RXONLY, CSUM or TXONLY packet forwarding mode",
+#endif
+       .tokens = {
+               (void *)&cmd_setfwd_set,
+               (void *)&cmd_setfwd_fwd,
+               (void *)&cmd_setfwd_mode,
+               NULL,
+       },
+};
+
+/* *** SET PROMISC MODE *** */
+struct cmd_set_promisc_mode_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t promisc;
+       cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
+       uint8_t port_num;                /* valid if "allports" argument == 0 */
+       cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_promisc_mode_parsed(void *parsed_result,
+                                       __attribute__((unused)) struct cmdline *cl,
+                                       void *allports)
+{
+       struct cmd_set_promisc_mode_result *res = parsed_result;
+       int enable;
+       portid_t i;
+
+       if (!strcmp(res->mode, "on"))
+               enable = 1;
+       else
+               enable = 0;
+
+       /* all ports */
+       if (allports) {
+               for (i = 0; i < nb_ports; i++) {
+                       if (enable)
+                               rte_eth_promiscuous_enable(i);
+                       else
+                               rte_eth_promiscuous_disable(i);
+               }
+       }
+       else {
+               if (enable)
+                       rte_eth_promiscuous_enable(res->port_num);
+               else
+                       rte_eth_promiscuous_disable(res->port_num);
+       }
+}
+
+cmdline_parse_token_string_t cmd_setpromisc_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setpromisc_promisc =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, promisc,
+                                "promisc");
+cmdline_parse_token_string_t cmd_setpromisc_portall =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, port_all,
+                                "all");
+cmdline_parse_token_num_t cmd_setpromisc_portnum =
+       TOKEN_NUM_INITIALIZER(struct cmd_set_promisc_mode_result, port_num,
+                             UINT8);
+cmdline_parse_token_string_t cmd_setpromisc_mode =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_promisc_mode_result, mode,
+                                "on#off");
+
+cmdline_parse_inst_t cmd_set_promisc_mode_all = {
+       .f = cmd_set_promisc_mode_parsed,
+       .data = (void *)1,
+       .help_str = "set promisc all on|off: set promisc mode for all ports",
+       .tokens = {
+               (void *)&cmd_setpromisc_set,
+               (void *)&cmd_setpromisc_promisc,
+               (void *)&cmd_setpromisc_portall,
+               (void *)&cmd_setpromisc_mode,
+               NULL,
+       },
+};
+
+cmdline_parse_inst_t cmd_set_promisc_mode_one = {
+       .f = cmd_set_promisc_mode_parsed,
+       .data = (void *)0,
+       .help_str = "set promisc X on|off: set promisc mode on port X",
+       .tokens = {
+               (void *)&cmd_setpromisc_set,
+               (void *)&cmd_setpromisc_promisc,
+               (void *)&cmd_setpromisc_portnum,
+               (void *)&cmd_setpromisc_mode,
+               NULL,
+       },
+};
+
+/* *** SET ALLMULTI MODE *** */
+struct cmd_set_allmulti_mode_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t allmulti;
+       cmdline_fixed_string_t port_all; /* valid if "allports" argument == 1 */
+       uint8_t port_num;                /* valid if "allports" argument == 0 */
+       cmdline_fixed_string_t mode;
+};
+
+static void cmd_set_allmulti_mode_parsed(void *parsed_result,
+                                       __attribute__((unused)) struct cmdline *cl,
+                                       void *allports)
+{
+       struct cmd_set_allmulti_mode_result *res = parsed_result;
+       int enable;
+       portid_t i;
+
+       if (!strcmp(res->mode, "on"))
+               enable = 1;
+       else
+               enable = 0;
+
+       /* all ports */
+       if (allports) {
+               for (i = 0; i < nb_ports; i++) {
+                       if (enable)
+                               rte_eth_allmulticast_enable(i);
+                       else
+                               rte_eth_allmulticast_disable(i);
+               }
+       }
+       else {
+               if (enable)
+                       rte_eth_allmulticast_enable(res->port_num);
+               else
+                       rte_eth_allmulticast_disable(res->port_num);
+       }
+}
+
+cmdline_parse_token_string_t cmd_setallmulti_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, set, "set");
+cmdline_parse_token_string_t cmd_setallmulti_allmulti =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, allmulti,
+                                "allmulti");
+cmdline_parse_token_string_t cmd_setallmulti_portall =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, port_all,
+                                "all");
+cmdline_parse_token_num_t cmd_setallmulti_portnum =
+       TOKEN_NUM_INITIALIZER(struct cmd_set_allmulti_mode_result, port_num,
+                             UINT8);
+cmdline_parse_token_string_t cmd_setallmulti_mode =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_allmulti_mode_result, mode,
+                                "on#off");
+
+cmdline_parse_inst_t cmd_set_allmulti_mode_all = {
+       .f = cmd_set_allmulti_mode_parsed,
+       .data = (void *)1,
+       .help_str = "set allmulti all on|off: set allmulti mode for all ports",
+       .tokens = {
+               (void *)&cmd_setallmulti_set,
+               (void *)&cmd_setallmulti_allmulti,
+               (void *)&cmd_setallmulti_portall,
+               (void *)&cmd_setallmulti_mode,
+               NULL,
+       },
+};
+
+cmdline_parse_inst_t cmd_set_allmulti_mode_one = {
+       .f = cmd_set_allmulti_mode_parsed,
+       .data = (void *)0,
+       .help_str = "set allmulti X on|off: set allmulti mode on port X",
+       .tokens = {
+               (void *)&cmd_setallmulti_set,
+               (void *)&cmd_setallmulti_allmulti,
+               (void *)&cmd_setallmulti_portnum,
+               (void *)&cmd_setallmulti_mode,
+               NULL,
+       },
+};
+
+/* *** ADD/REMOVE A PKT FILTER *** */
+struct cmd_pkt_filter_result {
+       cmdline_fixed_string_t pkt_filter;
+       uint8_t  port_id;
+       cmdline_fixed_string_t protocol;
+       cmdline_fixed_string_t src;
+       cmdline_ipaddr_t ip_src;
+       uint16_t port_src;
+       cmdline_fixed_string_t dst;
+       cmdline_ipaddr_t ip_dst;
+       uint16_t port_dst;
+       cmdline_fixed_string_t flexbytes;
+       uint16_t flexbytes_value;
+       cmdline_fixed_string_t vlan;
+       uint16_t  vlan_id;
+       cmdline_fixed_string_t queue;
+       int8_t  queue_id;
+       cmdline_fixed_string_t soft;
+       uint8_t  soft_id;
+};
+
+static void
+cmd_pkt_filter_parsed(void *parsed_result,
+                         __attribute__((unused)) struct cmdline *cl,
+                         __attribute__((unused)) void *data)
+{
+       struct rte_fdir_filter fdir_filter;
+       struct cmd_pkt_filter_result *res = parsed_result;
+
+       memset(&fdir_filter, 0, sizeof(struct rte_fdir_filter));
+
+       if (res->ip_src.family == AF_INET)
+               fdir_filter.ip_src.ipv4_addr = res->ip_src.addr.ipv4.s_addr;
+       else
+               memcpy(&(fdir_filter.ip_src.ipv6_addr),
+                      &(res->ip_src.addr.ipv6),
+                      sizeof(struct in6_addr));
+
+       if (res->ip_dst.family == AF_INET)
+               fdir_filter.ip_dst.ipv4_addr = res->ip_dst.addr.ipv4.s_addr;
+       else
+               memcpy(&(fdir_filter.ip_dst.ipv6_addr),
+                      &(res->ip_dst.addr.ipv6),
+                      sizeof(struct in6_addr));
+
+       fdir_filter.port_dst = rte_cpu_to_be_16(res->port_dst);
+       fdir_filter.port_src = rte_cpu_to_be_16(res->port_src);
+
+       if (!strcmp(res->protocol, "udp"))
+               fdir_filter.l4type = RTE_FDIR_L4TYPE_UDP;
+       else if (!strcmp(res->protocol, "tcp"))
+               fdir_filter.l4type = RTE_FDIR_L4TYPE_TCP;
+       else if (!strcmp(res->protocol, "sctp"))
+               fdir_filter.l4type = RTE_FDIR_L4TYPE_SCTP;
+       else /* default only IP */
+               fdir_filter.l4type = RTE_FDIR_L4TYPE_NONE;
+
+       if (res->ip_dst.family == AF_INET6)
+               fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV6;
+       else
+               fdir_filter.iptype = RTE_FDIR_IPTYPE_IPV4;
+
+       fdir_filter.vlan_id    = rte_cpu_to_be_16(res->vlan_id);
+       fdir_filter.flex_bytes = rte_cpu_to_be_16(res->flexbytes_value);
+
+       if (!strcmp(res->pkt_filter, "add_signature_filter"))
+               fdir_add_signature_filter(res->port_id, res->queue_id,
+                                         &fdir_filter);
+       else if (!strcmp(res->pkt_filter, "upd_signature_filter"))
+               fdir_update_signature_filter(res->port_id, res->queue_id,
+                                            &fdir_filter);
+       else if (!strcmp(res->pkt_filter, "rm_signature_filter"))
+               fdir_remove_signature_filter(res->port_id, &fdir_filter);
+       else if (!strcmp(res->pkt_filter, "add_perfect_filter"))
+               fdir_add_perfect_filter(res->port_id, res->soft_id,
+                                       res->queue_id,
+                                       (uint8_t) (res->queue_id < 0),
+                                       &fdir_filter);
+       else if (!strcmp(res->pkt_filter, "upd_perfect_filter"))
+               fdir_update_perfect_filter(res->port_id, res->soft_id,
+                                          res->queue_id,
+                                          (uint8_t) (res->queue_id < 0),
+                                          &fdir_filter);
+       else if (!strcmp(res->pkt_filter, "rm_perfect_filter"))
+               fdir_remove_perfect_filter(res->port_id, res->soft_id,
+                                          &fdir_filter);
+
+}
+
+
+cmdline_parse_token_num_t cmd_pkt_filter_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             port_id, UINT8);
+cmdline_parse_token_string_t cmd_pkt_filter_protocol =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                protocol, "ip#tcp#udp#sctp");
+cmdline_parse_token_string_t cmd_pkt_filter_src =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                src, "src");
+cmdline_parse_token_ipaddr_t cmd_pkt_filter_ip_src =
+       TOKEN_IPADDR_INITIALIZER(struct cmd_pkt_filter_result,
+                                ip_src);
+cmdline_parse_token_num_t cmd_pkt_filter_port_src =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             port_src, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_dst =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                dst, "dst");
+cmdline_parse_token_ipaddr_t cmd_pkt_filter_ip_dst =
+       TOKEN_IPADDR_INITIALIZER(struct cmd_pkt_filter_result,
+                                ip_dst);
+cmdline_parse_token_num_t cmd_pkt_filter_port_dst =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             port_dst, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_flexbytes =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                flexbytes, "flexbytes");
+cmdline_parse_token_num_t cmd_pkt_filter_flexbytes_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             flexbytes_value, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_vlan =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                vlan, "vlan");
+cmdline_parse_token_num_t cmd_pkt_filter_vlan_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             vlan_id, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_queue =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                queue, "queue");
+cmdline_parse_token_num_t cmd_pkt_filter_queue_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             queue_id, INT8);
+cmdline_parse_token_string_t cmd_pkt_filter_soft =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                soft, "soft");
+cmdline_parse_token_num_t cmd_pkt_filter_soft_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_result,
+                             soft_id, UINT16);
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_add_signature_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "add_signature_filter");
+cmdline_parse_inst_t cmd_add_signature_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "add a signature filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_add_signature_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               (void *)&cmd_pkt_filter_queue,
+               (void *)&cmd_pkt_filter_queue_id,
+               NULL,
+       },
+};
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_upd_signature_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "upd_signature_filter");
+cmdline_parse_inst_t cmd_upd_signature_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "update a signature filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_upd_signature_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               (void *)&cmd_pkt_filter_queue,
+               (void *)&cmd_pkt_filter_queue_id,
+               NULL,
+       },
+};
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_rm_signature_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "rm_signature_filter");
+cmdline_parse_inst_t cmd_rm_signature_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "remove a signature filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_rm_signature_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               NULL
+               },
+};
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_add_perfect_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "add_perfect_filter");
+cmdline_parse_inst_t cmd_add_perfect_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "add a perfect filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_add_perfect_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               (void *)&cmd_pkt_filter_queue,
+               (void *)&cmd_pkt_filter_queue_id,
+               (void *)&cmd_pkt_filter_soft,
+               (void *)&cmd_pkt_filter_soft_id,
+               NULL,
+       },
+};
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_upd_perfect_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "upd_perfect_filter");
+cmdline_parse_inst_t cmd_upd_perfect_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "update a perfect filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_upd_perfect_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               (void *)&cmd_pkt_filter_queue,
+               (void *)&cmd_pkt_filter_queue_id,
+               (void *)&cmd_pkt_filter_soft,
+               (void *)&cmd_pkt_filter_soft_id,
+               NULL,
+       },
+};
+
+
+cmdline_parse_token_string_t cmd_pkt_filter_rm_perfect_filter =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_result,
+                                pkt_filter, "rm_perfect_filter");
+cmdline_parse_inst_t cmd_rm_perfect_filter = {
+       .f = cmd_pkt_filter_parsed,
+       .data = NULL,
+       .help_str = "remove a perfect filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_rm_perfect_filter,
+               (void *)&cmd_pkt_filter_port_id,
+               (void *)&cmd_pkt_filter_protocol,
+               (void *)&cmd_pkt_filter_src,
+               (void *)&cmd_pkt_filter_ip_src,
+               (void *)&cmd_pkt_filter_port_src,
+               (void *)&cmd_pkt_filter_dst,
+               (void *)&cmd_pkt_filter_ip_dst,
+               (void *)&cmd_pkt_filter_port_dst,
+               (void *)&cmd_pkt_filter_flexbytes,
+               (void *)&cmd_pkt_filter_flexbytes_value,
+               (void *)&cmd_pkt_filter_vlan,
+               (void *)&cmd_pkt_filter_vlan_id,
+               (void *)&cmd_pkt_filter_soft,
+               (void *)&cmd_pkt_filter_soft_id,
+               NULL,
+       },
+};
+
+/* *** SETUP MASKS FILTER *** */
+struct cmd_pkt_filter_masks_result {
+       cmdline_fixed_string_t filter_mask;
+       uint8_t  port_id;
+       cmdline_fixed_string_t src_mask;
+       uint32_t ip_src_mask;
+       uint16_t port_src_mask;
+       cmdline_fixed_string_t dst_mask;
+       uint32_t ip_dst_mask;
+       uint16_t port_dst_mask;
+       cmdline_fixed_string_t flexbytes;
+       uint8_t flexbytes_value;
+       cmdline_fixed_string_t vlan_id;
+       uint8_t  vlan_id_value;
+       cmdline_fixed_string_t vlan_prio;
+       uint8_t  vlan_prio_value;
+       cmdline_fixed_string_t only_ip_flow;
+       uint8_t  only_ip_flow_value;
+};
+
+static void
+cmd_pkt_filter_masks_parsed(void *parsed_result,
+                         __attribute__((unused)) struct cmdline *cl,
+                         __attribute__((unused)) void *data)
+{
+       struct rte_fdir_masks fdir_masks;
+       struct cmd_pkt_filter_masks_result *res = parsed_result;
+
+       memset(&fdir_masks, 0, sizeof(struct rte_fdir_masks));
+
+       fdir_masks.only_ip_flow  = res->only_ip_flow_value;
+       fdir_masks.vlan_id       = res->vlan_id_value;
+       fdir_masks.vlan_prio     = res->vlan_prio_value;
+       fdir_masks.dst_ipv4_mask = res->ip_dst_mask;
+       fdir_masks.src_ipv4_mask = res->ip_src_mask;
+       fdir_masks.src_port_mask = res->port_src_mask;
+       fdir_masks.dst_port_mask = res->port_dst_mask;
+       fdir_masks.flexbytes     = res->flexbytes_value;
+
+       fdir_set_masks(res->port_id, &fdir_masks);
+}
+
+cmdline_parse_token_string_t cmd_pkt_filter_masks_filter_mask =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                filter_mask, "set_masks_filter");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             port_id, UINT8);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_only_ip_flow =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                only_ip_flow, "only_ip_flow");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_only_ip_flow_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             only_ip_flow_value, UINT8);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_src_mask =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                src_mask, "src_mask");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_ip_src_mask =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             ip_src_mask, UINT32);
+cmdline_parse_token_num_t cmd_pkt_filter_masks_port_src_mask =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             port_src_mask, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_dst_mask =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                src_mask, "dst_mask");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_ip_dst_mask =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             ip_dst_mask, UINT32);
+cmdline_parse_token_num_t cmd_pkt_filter_masks_port_dst_mask =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             port_dst_mask, UINT16);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_flexbytes =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                flexbytes, "flexbytes");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_flexbytes_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             flexbytes_value, UINT8);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_vlan_id =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                vlan_id, "vlan_id");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_vlan_id_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             vlan_id_value, UINT8);
+cmdline_parse_token_string_t cmd_pkt_filter_masks_vlan_prio =
+       TOKEN_STRING_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                                vlan_prio, "vlan_prio");
+cmdline_parse_token_num_t cmd_pkt_filter_masks_vlan_prio_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_pkt_filter_masks_result,
+                             vlan_prio_value, UINT8);
+
+cmdline_parse_inst_t cmd_set_masks_filter = {
+       .f = cmd_pkt_filter_masks_parsed,
+       .data = NULL,
+       .help_str = "setup masks filter",
+       .tokens = {
+               (void *)&cmd_pkt_filter_masks_filter_mask,
+               (void *)&cmd_pkt_filter_masks_port_id,
+               (void *)&cmd_pkt_filter_masks_only_ip_flow,
+               (void *)&cmd_pkt_filter_masks_only_ip_flow_value,
+               (void *)&cmd_pkt_filter_masks_src_mask,
+               (void *)&cmd_pkt_filter_masks_ip_src_mask,
+               (void *)&cmd_pkt_filter_masks_port_src_mask,
+               (void *)&cmd_pkt_filter_masks_dst_mask,
+               (void *)&cmd_pkt_filter_masks_ip_dst_mask,
+               (void *)&cmd_pkt_filter_masks_port_dst_mask,
+               (void *)&cmd_pkt_filter_masks_flexbytes,
+               (void *)&cmd_pkt_filter_masks_flexbytes_value,
+               (void *)&cmd_pkt_filter_masks_vlan_id,
+               (void *)&cmd_pkt_filter_masks_vlan_id_value,
+               (void *)&cmd_pkt_filter_masks_vlan_prio,
+               (void *)&cmd_pkt_filter_masks_vlan_prio_value,
+               NULL,
+       },
+};
+
+/* *** SETUP ETHERNET LINK FLOW CONTROL *** */
+struct cmd_link_flow_ctrl_set_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t flow_ctrl;
+       cmdline_fixed_string_t rx;
+       cmdline_fixed_string_t rx_lfc_mode;
+       cmdline_fixed_string_t tx;
+       cmdline_fixed_string_t tx_lfc_mode;
+       uint32_t high_water;
+       uint32_t low_water;
+       uint16_t pause_time;
+       uint16_t send_xon;
+       uint8_t  port_id;
+};
+
+static void
+cmd_link_flow_ctrl_set_parsed(void *parsed_result,
+                      __attribute__((unused)) struct cmdline *cl,
+                      __attribute__((unused)) void *data)
+{
+       struct cmd_link_flow_ctrl_set_result *res = parsed_result;
+       struct rte_eth_fc_conf fc_conf;
+       int rx_fc_enable, tx_fc_enable;
+       int ret;
+
+       /*
+        * Rx on/off, flow control is enabled/disabled on RX side. This can indicate
+        * the RTE_FC_TX_PAUSE, Transmit pause frame at the Rx side.
+        * Tx on/off, flow control is enabled/disabled on TX side. This can indicate
+        * the RTE_FC_RX_PAUSE, Respond to the pause frame at the Tx side.
+        */
+       static enum rte_eth_fc_mode rx_tx_onoff_2_lfc_mode[2][2] = {
+                       {RTE_FC_NONE, RTE_FC_RX_PAUSE}, {RTE_FC_TX_PAUSE, RTE_FC_FULL}
+       };
+
+       rx_fc_enable = (!strcmp(res->rx_lfc_mode, "on")) ? 1 : 0;
+       tx_fc_enable = (!strcmp(res->tx_lfc_mode, "on")) ? 1 : 0;
+
+       fc_conf.mode       = rx_tx_onoff_2_lfc_mode[rx_fc_enable][tx_fc_enable];
+       fc_conf.high_water = res->high_water;
+       fc_conf.low_water  = res->low_water;
+       fc_conf.pause_time = res->pause_time;
+       fc_conf.send_xon   = res->send_xon;
+
+       ret = rte_eth_dev_flow_ctrl_set(res->port_id, &fc_conf);
+       if (ret != 0)
+               printf("bad flow contrl parameter, return code = %d \n", ret);
+}
+
+cmdline_parse_token_string_t cmd_lfc_set_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               set, "set");
+cmdline_parse_token_string_t cmd_lfc_set_flow_ctrl =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               flow_ctrl, "flow_ctrl");
+cmdline_parse_token_string_t cmd_lfc_set_rx =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               rx, "rx");
+cmdline_parse_token_string_t cmd_lfc_set_rx_mode =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               rx_lfc_mode, "on#off");
+cmdline_parse_token_string_t cmd_lfc_set_tx =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               tx, "tx");
+cmdline_parse_token_string_t cmd_lfc_set_tx_mode =
+       TOKEN_STRING_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               tx_lfc_mode, "on#off");
+cmdline_parse_token_num_t cmd_lfc_set_high_water =
+       TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               high_water, UINT32);
+cmdline_parse_token_num_t cmd_lfc_set_low_water =
+       TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               low_water, UINT32);
+cmdline_parse_token_num_t cmd_lfc_set_pause_time =
+       TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               pause_time, UINT16);
+cmdline_parse_token_num_t cmd_lfc_set_send_xon =
+       TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               send_xon, UINT16);
+cmdline_parse_token_num_t cmd_lfc_set_portid =
+       TOKEN_NUM_INITIALIZER(struct cmd_link_flow_ctrl_set_result,
+                               port_id, UINT8);
+
+cmdline_parse_inst_t cmd_link_flow_control_set = {
+       .f = cmd_link_flow_ctrl_set_parsed,
+       .data = NULL,
+       .help_str = "Configure the Ethernet link flow control...",
+       .tokens = {
+               (void *)&cmd_lfc_set_set,
+               (void *)&cmd_lfc_set_flow_ctrl,
+               (void *)&cmd_lfc_set_rx,
+               (void *)&cmd_lfc_set_rx_mode,
+               (void *)&cmd_lfc_set_tx,
+               (void *)&cmd_lfc_set_tx_mode,
+               (void *)&cmd_lfc_set_high_water,
+               (void *)&cmd_lfc_set_low_water,
+               (void *)&cmd_lfc_set_pause_time,
+               (void *)&cmd_lfc_set_send_xon,
+               (void *)&cmd_lfc_set_portid,
+               NULL,
+       },
+};
+
+/* *** RESET CONFIGURATION *** */
+struct cmd_reset_result {
+       cmdline_fixed_string_t reset;
+       cmdline_fixed_string_t def;
+};
+
+static void cmd_reset_parsed(__attribute__((unused)) void *parsed_result,
+                            struct cmdline *cl,
+                            __attribute__((unused)) void *data)
+{
+       cmdline_printf(cl, "Reset to default forwarding configuration...\n");
+       set_def_fwd_config();
+}
+
+cmdline_parse_token_string_t cmd_reset_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_reset_result, reset, "set");
+cmdline_parse_token_string_t cmd_reset_def =
+       TOKEN_STRING_INITIALIZER(struct cmd_reset_result, def,
+                                "default");
+
+cmdline_parse_inst_t cmd_reset = {
+       .f = cmd_reset_parsed,
+       .data = NULL,
+       .help_str = "set default: reset default forwarding configuration",
+       .tokens = {
+               (void *)&cmd_reset_set,
+               (void *)&cmd_reset_def,
+               NULL,
+       },
+};
+
+/* *** START FORWARDING *** */
+struct cmd_start_result {
+       cmdline_fixed_string_t start;
+};
+
+cmdline_parse_token_string_t cmd_start_start =
+       TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
+
+static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
+                            __attribute__((unused)) struct cmdline *cl,
+                            __attribute__((unused)) void *data)
+{
+       start_packet_forwarding(0);
+}
+
+cmdline_parse_inst_t cmd_start = {
+       .f = cmd_start_parsed,
+       .data = NULL,
+       .help_str = "start packet forwarding",
+       .tokens = {
+               (void *)&cmd_start_start,
+               NULL,
+       },
+};
+
+/* *** START FORWARDING WITH ONE TX BURST FIRST *** */
+struct cmd_start_tx_first_result {
+       cmdline_fixed_string_t start;
+       cmdline_fixed_string_t tx_first;
+};
+
+static void
+cmd_start_tx_first_parsed(__attribute__((unused)) void *parsed_result,
+                         __attribute__((unused)) struct cmdline *cl,
+                         __attribute__((unused)) void *data)
+{
+       start_packet_forwarding(1);
+}
+
+cmdline_parse_token_string_t cmd_start_tx_first_start =
+       TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result, start,
+                                "start");
+cmdline_parse_token_string_t cmd_start_tx_first_tx_first =
+       TOKEN_STRING_INITIALIZER(struct cmd_start_tx_first_result,
+                                tx_first, "tx_first");
+
+cmdline_parse_inst_t cmd_start_tx_first = {
+       .f = cmd_start_tx_first_parsed,
+       .data = NULL,
+       .help_str = "start packet forwarding, after sending 1 burst of packets",
+       .tokens = {
+               (void *)&cmd_start_tx_first_start,
+               (void *)&cmd_start_tx_first_tx_first,
+               NULL,
+       },
+};
+
+/* *** SHOW CFG *** */
+struct cmd_showcfg_result {
+       cmdline_fixed_string_t show;
+       cmdline_fixed_string_t cfg;
+       cmdline_fixed_string_t what;
+};
+
+static void cmd_showcfg_parsed(void *parsed_result,
+                              __attribute__((unused)) struct cmdline *cl,
+                              __attribute__((unused)) void *data)
+{
+       struct cmd_showcfg_result *res = parsed_result;
+       if (!strcmp(res->what, "rxtx"))
+               rxtx_config_display();
+       else if (!strcmp(res->what, "cores"))
+               fwd_lcores_config_display();
+       else if (!strcmp(res->what, "fwd"))
+               fwd_config_display();
+}
+
+cmdline_parse_token_string_t cmd_showcfg_show =
+       TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, show, "show");
+cmdline_parse_token_string_t cmd_showcfg_port =
+       TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, cfg, "config");
+cmdline_parse_token_string_t cmd_showcfg_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_showcfg_result, what,
+                                "rxtx#cores#fwd");
+
+cmdline_parse_inst_t cmd_showcfg = {
+       .f = cmd_showcfg_parsed,
+       .data = NULL,
+       .help_str = "show config rxtx|cores|fwd",
+       .tokens = {
+               (void *)&cmd_showcfg_show,
+               (void *)&cmd_showcfg_port,
+               (void *)&cmd_showcfg_what,
+               NULL,
+       },
+};
+
+/* *** SHOW ALL PORT INFO *** */
+struct cmd_showportall_result {
+       cmdline_fixed_string_t show;
+       cmdline_fixed_string_t port;
+       cmdline_fixed_string_t what;
+       cmdline_fixed_string_t all;
+};
+
+static void cmd_showportall_parsed(void *parsed_result,
+                               __attribute__((unused)) struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       portid_t i;
+
+       struct cmd_showportall_result *res = parsed_result;
+       if (!strcmp(res->show, "clear")) {
+               if (!strcmp(res->what, "stats"))
+                       for (i = 0; i < nb_ports; i++)
+                               nic_stats_clear(i);
+       } else if (!strcmp(res->what, "info"))
+               for (i = 0; i < nb_ports; i++)
+                       port_infos_display(i);
+       else if (!strcmp(res->what, "stats"))
+               for (i = 0; i < nb_ports; i++)
+                       nic_stats_display(i);
+       else if (!strcmp(res->what, "fdir"))
+               for (i = 0; i < nb_ports; i++)
+                       fdir_get_infos(i);
+}
+
+cmdline_parse_token_string_t cmd_showportall_show =
+       TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, show,
+                                "show#clear");
+cmdline_parse_token_string_t cmd_showportall_port =
+       TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, port, "port");
+cmdline_parse_token_string_t cmd_showportall_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, what,
+                                "info#stats#fdir");
+cmdline_parse_token_string_t cmd_showportall_all =
+       TOKEN_STRING_INITIALIZER(struct cmd_showportall_result, all, "all");
+cmdline_parse_inst_t cmd_showportall = {
+       .f = cmd_showportall_parsed,
+       .data = NULL,
+       .help_str = "show|clear port info|stats|fdir all",
+       .tokens = {
+               (void *)&cmd_showportall_show,
+               (void *)&cmd_showportall_port,
+               (void *)&cmd_showportall_what,
+               (void *)&cmd_showportall_all,
+               NULL,
+       },
+};
+
+/* *** SHOW PORT INFO *** */
+struct cmd_showport_result {
+       cmdline_fixed_string_t show;
+       cmdline_fixed_string_t port;
+       cmdline_fixed_string_t what;
+       uint8_t portnum;
+};
+
+static void cmd_showport_parsed(void *parsed_result,
+                               __attribute__((unused)) struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_showport_result *res = parsed_result;
+       if (!strcmp(res->show, "clear")) {
+               if (!strcmp(res->what, "stats"))
+                       nic_stats_clear(res->portnum);
+       } else if (!strcmp(res->what, "info"))
+               port_infos_display(res->portnum);
+       else if (!strcmp(res->what, "stats"))
+               nic_stats_display(res->portnum);
+       else if (!strcmp(res->what, "fdir"))
+                fdir_get_infos(res->portnum);
+}
+
+cmdline_parse_token_string_t cmd_showport_show =
+       TOKEN_STRING_INITIALIZER(struct cmd_showport_result, show,
+                                "show#clear");
+cmdline_parse_token_string_t cmd_showport_port =
+       TOKEN_STRING_INITIALIZER(struct cmd_showport_result, port, "port");
+cmdline_parse_token_string_t cmd_showport_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_showport_result, what,
+                                "info#stats#fdir");
+cmdline_parse_token_num_t cmd_showport_portnum =
+       TOKEN_NUM_INITIALIZER(struct cmd_showport_result, portnum, INT32);
+
+cmdline_parse_inst_t cmd_showport = {
+       .f = cmd_showport_parsed,
+       .data = NULL,
+       .help_str = "show|clear port info|stats|fdir X (X = port number)",
+       .tokens = {
+               (void *)&cmd_showport_show,
+               (void *)&cmd_showport_port,
+               (void *)&cmd_showport_what,
+               (void *)&cmd_showport_portnum,
+               NULL,
+       },
+};
+
+/* *** READ PORT REGISTER *** */
+struct cmd_read_reg_result {
+       cmdline_fixed_string_t read;
+       cmdline_fixed_string_t reg;
+       uint8_t port_id;
+       uint32_t reg_off;
+};
+
+static void
+cmd_read_reg_parsed(void *parsed_result,
+                   __attribute__((unused)) struct cmdline *cl,
+                   __attribute__((unused)) void *data)
+{
+       struct cmd_read_reg_result *res = parsed_result;
+       port_reg_display(res->port_id, res->reg_off);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_read =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, read, "read");
+cmdline_parse_token_string_t cmd_read_reg_reg =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_result, reg, "reg");
+cmdline_parse_token_num_t cmd_read_reg_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_reg_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_result, reg_off, UINT32);
+
+cmdline_parse_inst_t cmd_read_reg = {
+       .f = cmd_read_reg_parsed,
+       .data = NULL,
+       .help_str = "read reg port_id reg_off",
+       .tokens = {
+               (void *)&cmd_read_reg_read,
+               (void *)&cmd_read_reg_reg,
+               (void *)&cmd_read_reg_port_id,
+               (void *)&cmd_read_reg_reg_off,
+               NULL,
+       },
+};
+
+/* *** READ PORT REGISTER BIT FIELD *** */
+struct cmd_read_reg_bit_field_result {
+       cmdline_fixed_string_t read;
+       cmdline_fixed_string_t regfield;
+       uint8_t port_id;
+       uint32_t reg_off;
+       uint8_t bit1_pos;
+       uint8_t bit2_pos;
+};
+
+static void
+cmd_read_reg_bit_field_parsed(void *parsed_result,
+                             __attribute__((unused)) struct cmdline *cl,
+                             __attribute__((unused)) void *data)
+{
+       struct cmd_read_reg_bit_field_result *res = parsed_result;
+       port_reg_bit_field_display(res->port_id, res->reg_off,
+                                  res->bit1_pos, res->bit2_pos);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_bit_field_read =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result, read,
+                                "read");
+cmdline_parse_token_string_t cmd_read_reg_bit_field_regfield =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_field_result,
+                                regfield, "regfield");
+cmdline_parse_token_num_t cmd_read_reg_bit_field_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, port_id,
+                             UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, reg_off,
+                             UINT32);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_bit1_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit1_pos,
+                             UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_field_bit2_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_field_result, bit2_pos,
+                             UINT8);
+
+cmdline_parse_inst_t cmd_read_reg_bit_field = {
+       .f = cmd_read_reg_bit_field_parsed,
+       .data = NULL,
+       .help_str = "read regfield port_id reg_off bit_x bit_y "
+       "(read register bit field between bit_x and bit_y included)",
+       .tokens = {
+               (void *)&cmd_read_reg_bit_field_read,
+               (void *)&cmd_read_reg_bit_field_regfield,
+               (void *)&cmd_read_reg_bit_field_port_id,
+               (void *)&cmd_read_reg_bit_field_reg_off,
+               (void *)&cmd_read_reg_bit_field_bit1_pos,
+               (void *)&cmd_read_reg_bit_field_bit2_pos,
+               NULL,
+       },
+};
+
+/* *** READ PORT REGISTER BIT *** */
+struct cmd_read_reg_bit_result {
+       cmdline_fixed_string_t read;
+       cmdline_fixed_string_t regbit;
+       uint8_t port_id;
+       uint32_t reg_off;
+       uint8_t bit_pos;
+};
+
+static void
+cmd_read_reg_bit_parsed(void *parsed_result,
+                       __attribute__((unused)) struct cmdline *cl,
+                       __attribute__((unused)) void *data)
+{
+       struct cmd_read_reg_bit_result *res = parsed_result;
+       port_reg_bit_display(res->port_id, res->reg_off, res->bit_pos);
+}
+
+cmdline_parse_token_string_t cmd_read_reg_bit_read =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result, read, "read");
+cmdline_parse_token_string_t cmd_read_reg_bit_regbit =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_reg_bit_result,
+                                regbit, "regbit");
+cmdline_parse_token_num_t cmd_read_reg_bit_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_reg_bit_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_read_reg_bit_bit_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_reg_bit_result, bit_pos, UINT8);
+
+cmdline_parse_inst_t cmd_read_reg_bit = {
+       .f = cmd_read_reg_bit_parsed,
+       .data = NULL,
+       .help_str = "read regbit port_id reg_off bit_x (0 <= bit_x <= 31)",
+       .tokens = {
+               (void *)&cmd_read_reg_bit_read,
+               (void *)&cmd_read_reg_bit_regbit,
+               (void *)&cmd_read_reg_bit_port_id,
+               (void *)&cmd_read_reg_bit_reg_off,
+               (void *)&cmd_read_reg_bit_bit_pos,
+               NULL,
+       },
+};
+
+/* *** WRITE PORT REGISTER *** */
+struct cmd_write_reg_result {
+       cmdline_fixed_string_t write;
+       cmdline_fixed_string_t reg;
+       uint8_t port_id;
+       uint32_t reg_off;
+       uint32_t value;
+};
+
+static void
+cmd_write_reg_parsed(void *parsed_result,
+                    __attribute__((unused)) struct cmdline *cl,
+                    __attribute__((unused)) void *data)
+{
+       struct cmd_write_reg_result *res = parsed_result;
+       port_reg_set(res->port_id, res->reg_off, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_write =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, write, "write");
+cmdline_parse_token_string_t cmd_write_reg_reg =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_result, reg, "reg");
+cmdline_parse_token_num_t cmd_write_reg_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_write_reg_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_result, value, UINT32);
+
+cmdline_parse_inst_t cmd_write_reg = {
+       .f = cmd_write_reg_parsed,
+       .data = NULL,
+       .help_str = "write reg port_id reg_off reg_value",
+       .tokens = {
+               (void *)&cmd_write_reg_write,
+               (void *)&cmd_write_reg_reg,
+               (void *)&cmd_write_reg_port_id,
+               (void *)&cmd_write_reg_reg_off,
+               (void *)&cmd_write_reg_value,
+               NULL,
+       },
+};
+
+/* *** WRITE PORT REGISTER BIT FIELD *** */
+struct cmd_write_reg_bit_field_result {
+       cmdline_fixed_string_t write;
+       cmdline_fixed_string_t regfield;
+       uint8_t port_id;
+       uint32_t reg_off;
+       uint8_t bit1_pos;
+       uint8_t bit2_pos;
+       uint32_t value;
+};
+
+static void
+cmd_write_reg_bit_field_parsed(void *parsed_result,
+                              __attribute__((unused)) struct cmdline *cl,
+                              __attribute__((unused)) void *data)
+{
+       struct cmd_write_reg_bit_field_result *res = parsed_result;
+       port_reg_bit_field_set(res->port_id, res->reg_off,
+                         res->bit1_pos, res->bit2_pos, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_bit_field_write =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result, write,
+                                "write");
+cmdline_parse_token_string_t cmd_write_reg_bit_field_regfield =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_field_result,
+                                regfield, "regfield");
+cmdline_parse_token_num_t cmd_write_reg_bit_field_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, port_id,
+                             UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, reg_off,
+                             UINT32);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_bit1_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit1_pos,
+                             UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_bit2_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, bit2_pos,
+                             UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_field_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_field_result, value,
+                             UINT32);
+
+cmdline_parse_inst_t cmd_write_reg_bit_field = {
+       .f = cmd_write_reg_bit_field_parsed,
+       .data = NULL,
+       .help_str = "write regfield port_id reg_off bit_x bit_y reg_value"
+       "(set register bit field between bit_x and bit_y included)",
+       .tokens = {
+               (void *)&cmd_write_reg_bit_field_write,
+               (void *)&cmd_write_reg_bit_field_regfield,
+               (void *)&cmd_write_reg_bit_field_port_id,
+               (void *)&cmd_write_reg_bit_field_reg_off,
+               (void *)&cmd_write_reg_bit_field_bit1_pos,
+               (void *)&cmd_write_reg_bit_field_bit2_pos,
+               (void *)&cmd_write_reg_bit_field_value,
+               NULL,
+       },
+};
+
+/* *** WRITE PORT REGISTER BIT *** */
+struct cmd_write_reg_bit_result {
+       cmdline_fixed_string_t write;
+       cmdline_fixed_string_t regbit;
+       uint8_t port_id;
+       uint32_t reg_off;
+       uint8_t bit_pos;
+       uint8_t value;
+};
+
+static void
+cmd_write_reg_bit_parsed(void *parsed_result,
+                        __attribute__((unused)) struct cmdline *cl,
+                        __attribute__((unused)) void *data)
+{
+       struct cmd_write_reg_bit_result *res = parsed_result;
+       port_reg_bit_set(res->port_id, res->reg_off, res->bit_pos, res->value);
+}
+
+cmdline_parse_token_string_t cmd_write_reg_bit_write =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result, write,
+                                "write");
+cmdline_parse_token_string_t cmd_write_reg_bit_regbit =
+       TOKEN_STRING_INITIALIZER(struct cmd_write_reg_bit_result,
+                                regbit, "regbit");
+cmdline_parse_token_num_t cmd_write_reg_bit_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_reg_off =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, reg_off, UINT32);
+cmdline_parse_token_num_t cmd_write_reg_bit_bit_pos =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, bit_pos, UINT8);
+cmdline_parse_token_num_t cmd_write_reg_bit_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_write_reg_bit_result, value, UINT8);
+
+cmdline_parse_inst_t cmd_write_reg_bit = {
+       .f = cmd_write_reg_bit_parsed,
+       .data = NULL,
+       .help_str = "write regbit port_id reg_off bit_x 0/1 (0 <= bit_x <= 31)",
+       .tokens = {
+               (void *)&cmd_write_reg_bit_write,
+               (void *)&cmd_write_reg_bit_regbit,
+               (void *)&cmd_write_reg_bit_port_id,
+               (void *)&cmd_write_reg_bit_reg_off,
+               (void *)&cmd_write_reg_bit_bit_pos,
+               (void *)&cmd_write_reg_bit_value,
+               NULL,
+       },
+};
+
+/* *** READ A RING DESCRIPTOR OF A PORT RX/TX QUEUE *** */
+struct cmd_read_rxd_txd_result {
+       cmdline_fixed_string_t read;
+       cmdline_fixed_string_t rxd_txd;
+       uint8_t port_id;
+       uint16_t queue_id;
+       uint16_t desc_id;
+};
+
+static void
+cmd_read_rxd_txd_parsed(void *parsed_result,
+                       __attribute__((unused)) struct cmdline *cl,
+                       __attribute__((unused)) void *data)
+{
+       struct cmd_read_rxd_txd_result *res = parsed_result;
+
+       if (!strcmp(res->rxd_txd, "rxd"))
+               rx_ring_desc_display(res->port_id, res->queue_id, res->desc_id);
+       else if (!strcmp(res->rxd_txd, "txd"))
+               tx_ring_desc_display(res->port_id, res->queue_id, res->desc_id);
+}
+
+cmdline_parse_token_string_t cmd_read_rxd_txd_read =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, read, "read");
+cmdline_parse_token_string_t cmd_read_rxd_txd_rxd_txd =
+       TOKEN_STRING_INITIALIZER(struct cmd_read_rxd_txd_result, rxd_txd,
+                                "rxd#txd");
+cmdline_parse_token_num_t cmd_read_rxd_txd_port_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, port_id, UINT8);
+cmdline_parse_token_num_t cmd_read_rxd_txd_queue_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, queue_id, UINT16);
+cmdline_parse_token_num_t cmd_read_rxd_txd_desc_id =
+       TOKEN_NUM_INITIALIZER(struct cmd_read_rxd_txd_result, desc_id, UINT16);
+
+cmdline_parse_inst_t cmd_read_rxd_txd = {
+       .f = cmd_read_rxd_txd_parsed,
+       .data = NULL,
+       .help_str = "read rxd|txd port_id queue_id rxd_id",
+       .tokens = {
+               (void *)&cmd_read_rxd_txd_read,
+               (void *)&cmd_read_rxd_txd_rxd_txd,
+               (void *)&cmd_read_rxd_txd_port_id,
+               (void *)&cmd_read_rxd_txd_queue_id,
+               (void *)&cmd_read_rxd_txd_desc_id,
+               NULL,
+       },
+};
+
+/* *** QUIT *** */
+struct cmd_quit_result {
+       cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+                           struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       pmd_test_exit();
+       cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+       TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+       .f = cmd_quit_parsed,
+       .data = NULL,
+       .help_str = "exit application",
+       .tokens = {
+               (void *)&cmd_quit_quit,
+               NULL,
+       },
+};
+
+/* *** ADD/REMOVE MAC ADDRESS FROM A PORT *** */
+struct cmd_mac_addr_result {
+       cmdline_fixed_string_t mac_addr_cmd;
+       cmdline_fixed_string_t what;
+       uint8_t port_num;
+       struct ether_addr address;
+};
+
+static void cmd_mac_addr_parsed(void *parsed_result,
+               __attribute__((unused)) struct cmdline *cl,
+               __attribute__((unused)) void *data)
+{
+       struct cmd_mac_addr_result *res = parsed_result;
+       int ret;
+
+       if (strcmp(res->what, "add") == 0)
+               ret = rte_eth_dev_mac_addr_add(res->port_num, &res->address, 0);
+       else
+               ret = rte_eth_dev_mac_addr_remove(res->port_num, &res->address);
+
+       /* check the return value and print it if is < 0 */
+       if(ret < 0)
+               printf("mac_addr_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+cmdline_parse_token_string_t cmd_mac_addr_cmd =
+       TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, mac_addr_cmd,
+                               "mac_addr");
+cmdline_parse_token_string_t cmd_mac_addr_what =
+       TOKEN_STRING_INITIALIZER(struct cmd_mac_addr_result, what,
+                               "add#remove");
+cmdline_parse_token_num_t cmd_mac_addr_portnum =
+               TOKEN_NUM_INITIALIZER(struct cmd_mac_addr_result, port_num, UINT8);
+cmdline_parse_token_string_t cmd_mac_addr_addr =
+               TOKEN_ETHERADDR_INITIALIZER(struct cmd_mac_addr_result, address);
+
+cmdline_parse_inst_t cmd_mac_addr = {
+       .f = cmd_mac_addr_parsed,
+       .data = (void *)0,
+       .help_str = "mac_addr add|remove X <address>: "
+                       "add/remove MAC address on port X",
+       .tokens = {
+               (void *)&cmd_mac_addr_cmd,
+               (void *)&cmd_mac_addr_what,
+               (void *)&cmd_mac_addr_portnum,
+               (void *)&cmd_mac_addr_addr,
+               NULL,
+       },
+};
+
+
+/* list of instructions */
+cmdline_parse_ctx_t main_ctx[] = {
+       (cmdline_parse_inst_t *)&cmd_help,
+       (cmdline_parse_inst_t *)&cmd_quit,
+       (cmdline_parse_inst_t *)&cmd_showport,
+       (cmdline_parse_inst_t *)&cmd_showportall,
+       (cmdline_parse_inst_t *)&cmd_showcfg,
+       (cmdline_parse_inst_t *)&cmd_start,
+       (cmdline_parse_inst_t *)&cmd_start_tx_first,
+       (cmdline_parse_inst_t *)&cmd_reset,
+       (cmdline_parse_inst_t *)&cmd_set_numbers,
+       (cmdline_parse_inst_t *)&cmd_set_txpkts,
+       (cmdline_parse_inst_t *)&cmd_set_fwd_list,
+       (cmdline_parse_inst_t *)&cmd_set_fwd_mask,
+       (cmdline_parse_inst_t *)&cmd_set_fwd_mode,
+       (cmdline_parse_inst_t *)&cmd_set_promisc_mode_one,
+       (cmdline_parse_inst_t *)&cmd_set_promisc_mode_all,
+       (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_one,
+       (cmdline_parse_inst_t *)&cmd_set_allmulti_mode_all,
+       (cmdline_parse_inst_t *)&cmd_rx_vlan_filter_all,
+       (cmdline_parse_inst_t *)&cmd_rx_vlan_filter,
+       (cmdline_parse_inst_t *)&cmd_tx_vlan_set,
+       (cmdline_parse_inst_t *)&cmd_tx_vlan_reset,
+       (cmdline_parse_inst_t *)&cmd_tx_cksum_set,
+       (cmdline_parse_inst_t *)&cmd_link_flow_control_set,
+       (cmdline_parse_inst_t *)&cmd_read_reg,
+       (cmdline_parse_inst_t *)&cmd_read_reg_bit_field,
+       (cmdline_parse_inst_t *)&cmd_read_reg_bit,
+       (cmdline_parse_inst_t *)&cmd_write_reg,
+       (cmdline_parse_inst_t *)&cmd_write_reg_bit_field,
+       (cmdline_parse_inst_t *)&cmd_write_reg_bit,
+       (cmdline_parse_inst_t *)&cmd_read_rxd_txd,
+       (cmdline_parse_inst_t *)&cmd_add_signature_filter,
+       (cmdline_parse_inst_t *)&cmd_upd_signature_filter,
+       (cmdline_parse_inst_t *)&cmd_rm_signature_filter,
+       (cmdline_parse_inst_t *)&cmd_add_perfect_filter,
+       (cmdline_parse_inst_t *)&cmd_upd_perfect_filter,
+       (cmdline_parse_inst_t *)&cmd_rm_perfect_filter,
+       (cmdline_parse_inst_t *)&cmd_set_masks_filter,
+       (cmdline_parse_inst_t *)&cmd_stop,
+       (cmdline_parse_inst_t *)&cmd_mac_addr,
+       NULL,
+};
+
+/* prompt function, called from main on MASTER lcore */
+void
+prompt(void)
+{
+       struct cmdline *cl;
+
+       cl = cmdline_stdin_new(main_ctx, "testpmd> ");
+       if (cl == NULL) {
+               return;
+       }
+       cmdline_interact(cl);
+       cmdline_stdin_exit(cl);
+}
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
new file mode 100644 (file)
index 0000000..fd62235
--- /dev/null
@@ -0,0 +1,1142 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+static void
+print_ethaddr(const char *name, struct ether_addr *eth_addr)
+{
+       printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+              eth_addr->addr_bytes[0],
+              eth_addr->addr_bytes[1],
+              eth_addr->addr_bytes[2],
+              eth_addr->addr_bytes[3],
+              eth_addr->addr_bytes[4],
+              eth_addr->addr_bytes[5]);
+}
+
+void
+nic_stats_display(portid_t port_id)
+{
+       struct rte_eth_stats stats;
+
+       static const char *nic_stats_border = "########################";
+
+       if (port_id >= nb_ports) {
+               printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+               return;
+       }
+       rte_eth_stats_get(port_id, &stats);
+       printf("\n  %s NIC statistics for port %-2d %s\n",
+              nic_stats_border, port_id, nic_stats_border);
+       printf("  RX-packets: %-10"PRIu64" RX-errors: %-10"PRIu64"RX-bytes: "
+              "%-"PRIu64"\n"
+              "  TX-packets: %-10"PRIu64" TX-errors: %-10"PRIu64"TX-bytes: "
+              "%-"PRIu64"\n",
+              stats.ipackets, stats.ierrors, stats.ibytes,
+              stats.opackets, stats.oerrors, stats.obytes);
+
+       /* stats fdir */
+       if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
+               printf("  Fdirmiss:   %-10"PRIu64" Fdirmatch: %-10"PRIu64"\n",
+                      stats.fdirmiss,
+                      stats.fdirmatch);
+
+       printf("  %s############################%s\n",
+              nic_stats_border, nic_stats_border);
+}
+
+void
+nic_stats_clear(portid_t port_id)
+{
+       if (port_id >= nb_ports) {
+               printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+               return;
+       }
+       rte_eth_stats_reset(port_id);
+       printf("\n  NIC statistics for port %d cleared\n", port_id);
+}
+
+void
+port_infos_display(portid_t port_id)
+{
+       struct rte_port *port;
+       struct rte_eth_link link;
+       static const char *info_border = "*********************";
+
+       if (port_id >= nb_ports) {
+               printf("Invalid port, range is [0, %d]\n", nb_ports - 1);
+               return;
+       }
+       port = &ports[port_id];
+       rte_eth_link_get(port_id, &link);
+       printf("\n%s Infos for port %-2d %s\n",
+              info_border, port_id, info_border);
+       print_ethaddr("MAC address: ", &port->eth_addr);
+       printf("\nLink status: %s\n", (link.link_status) ? ("up") : ("down"));
+       printf("Link speed: %u Mbps\n", (unsigned) link.link_speed);
+       printf("Link duplex: %s\n", (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+              ("full-duplex") : ("half-duplex"));
+       printf("Promiscuous mode: %s\n",
+              rte_eth_promiscuous_get(port_id) ? "enabled" : "disabled");
+       printf("Allmulticast mode: %s\n",
+              rte_eth_allmulticast_get(port_id) ? "enabled" : "disabled");
+       printf("Maximum number of MAC addresses: %u\n",
+              (unsigned int)(port->dev_info.max_mac_addrs));
+}
+
+static int
+port_id_is_invalid(portid_t port_id)
+{
+       if (port_id < nb_ports)
+               return 0;
+       printf("Invalid port %d (must be < nb_ports=%d)\n", port_id, nb_ports);
+       return 1;
+}
+
+static int
+vlan_id_is_invalid(uint16_t vlan_id)
+{
+       if (vlan_id < 4096)
+               return 0;
+       printf("Invalid vlan_id %d (must be < 4096)\n", vlan_id);
+       return 1;
+}
+
+static int
+port_reg_off_is_invalid(portid_t port_id, uint32_t reg_off)
+{
+       uint64_t pci_len;
+
+       if (reg_off & 0x3) {
+               printf("Port register offset 0x%X not aligned on a 4-byte "
+                      "boundary\n",
+                      (unsigned)reg_off);
+               return 1;
+       }
+       pci_len = ports[port_id].dev_info.pci_dev->mem_resource.len;
+       if (reg_off >= pci_len) {
+               printf("Port %d: register offset %u (0x%X) out of port PCI "
+                      "resource (length=%"PRIu64")\n",
+                      port_id, (unsigned)reg_off, (unsigned)reg_off,  pci_len);
+               return 1;
+       }
+       return 0;
+}
+
+static int
+reg_bit_pos_is_invalid(uint8_t bit_pos)
+{
+       if (bit_pos <= 31)
+               return 0;
+       printf("Invalid bit position %d (must be <= 31)\n", bit_pos);
+       return 1;
+}
+
+#define display_port_and_reg_off(port_id, reg_off) \
+       printf("port %d PCI register at offset 0x%X: ", (port_id), (reg_off))
+
+static inline void
+display_port_reg_value(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
+{
+       display_port_and_reg_off(port_id, (unsigned)reg_off);
+       printf("0x%08X (%u)\n", (unsigned)reg_v, (unsigned)reg_v);
+}
+
+void
+port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_x)
+{
+       uint32_t reg_v;
+
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       if (reg_bit_pos_is_invalid(bit_x))
+               return;
+       reg_v = port_id_pci_reg_read(port_id, reg_off);
+       display_port_and_reg_off(port_id, (unsigned)reg_off);
+       printf("bit %d=%d\n", bit_x, (int) ((reg_v & (1 << bit_x)) >> bit_x));
+}
+
+void
+port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
+                          uint8_t bit1_pos, uint8_t bit2_pos)
+{
+       uint32_t reg_v;
+       uint8_t  l_bit;
+       uint8_t  h_bit;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       if (reg_bit_pos_is_invalid(bit1_pos))
+               return;
+       if (reg_bit_pos_is_invalid(bit2_pos))
+               return;
+       if (bit1_pos > bit2_pos)
+               l_bit = bit2_pos, h_bit = bit1_pos;
+       else
+               l_bit = bit1_pos, h_bit = bit2_pos;
+
+       reg_v = port_id_pci_reg_read(port_id, reg_off);
+       reg_v >>= l_bit;
+       if (h_bit < 31)
+               reg_v &= ((1 << (h_bit - l_bit + 1)) - 1);
+       display_port_and_reg_off(port_id, (unsigned)reg_off);
+       printf("bits[%d, %d]=0x%0*X (%u)\n", l_bit, h_bit,
+              ((h_bit - l_bit) / 4) + 1, (unsigned)reg_v, (unsigned)reg_v);
+}
+
+void
+port_reg_display(portid_t port_id, uint32_t reg_off)
+{
+       uint32_t reg_v;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       reg_v = port_id_pci_reg_read(port_id, reg_off);
+       display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
+                uint8_t bit_v)
+{
+       uint32_t reg_v;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       if (reg_bit_pos_is_invalid(bit_pos))
+               return;
+       if (bit_v > 1) {
+               printf("Invalid bit value %d (must be 0 or 1)\n", (int) bit_v);
+               return;
+       }
+       reg_v = port_id_pci_reg_read(port_id, reg_off);
+       if (bit_v == 0)
+               reg_v &= ~(1 << bit_pos);
+       else
+               reg_v |= (1 << bit_pos);
+       port_id_pci_reg_write(port_id, reg_off, reg_v);
+       display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
+                      uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value)
+{
+       uint32_t max_v;
+       uint32_t reg_v;
+       uint8_t  l_bit;
+       uint8_t  h_bit;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       if (reg_bit_pos_is_invalid(bit1_pos))
+               return;
+       if (reg_bit_pos_is_invalid(bit2_pos))
+               return;
+       if (bit1_pos > bit2_pos)
+               l_bit = bit2_pos, h_bit = bit1_pos;
+       else
+               l_bit = bit1_pos, h_bit = bit2_pos;
+
+       if ((h_bit - l_bit) < 31)
+               max_v = (1 << (h_bit - l_bit + 1)) - 1;
+       else
+               max_v = 0xFFFFFFFF;
+
+       if (value > max_v) {
+               printf("Invalid value %u (0x%x) must be < %u (0x%x)\n",
+                               (unsigned)value, (unsigned)value,
+                               (unsigned)max_v, (unsigned)max_v);
+               return;
+       }
+       reg_v = port_id_pci_reg_read(port_id, reg_off);
+       reg_v &= ~(max_v << l_bit); /* Keep unchanged bits */
+       reg_v |= (value << l_bit); /* Set changed bits */
+       port_id_pci_reg_write(port_id, reg_off, reg_v);
+       display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+void
+port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t reg_v)
+{
+       if (port_id_is_invalid(port_id))
+               return;
+       if (port_reg_off_is_invalid(port_id, reg_off))
+               return;
+       port_id_pci_reg_write(port_id, reg_off, reg_v);
+       display_port_reg_value(port_id, reg_off, reg_v);
+}
+
+/*
+ * RX/TX ring descriptors display functions.
+ */
+static int
+rx_queue_id_is_invalid(queueid_t rxq_id)
+{
+       if (rxq_id < nb_rxq)
+               return 0;
+       printf("Invalid RX queue %d (must be < nb_rxq=%d)\n", rxq_id, nb_rxq);
+       return 1;
+}
+
+static int
+tx_queue_id_is_invalid(queueid_t txq_id)
+{
+       if (txq_id < nb_txq)
+               return 0;
+       printf("Invalid TX queue %d (must be < nb_rxq=%d)\n", txq_id, nb_txq);
+       return 1;
+}
+
+static int
+rx_desc_id_is_invalid(uint16_t rxdesc_id)
+{
+       if (rxdesc_id < nb_rxd)
+               return 0;
+       printf("Invalid RX descriptor %d (must be < nb_rxd=%d)\n",
+              rxdesc_id, nb_rxd);
+       return 1;
+}
+
+static int
+tx_desc_id_is_invalid(uint16_t txdesc_id)
+{
+       if (txdesc_id < nb_txd)
+               return 0;
+       printf("Invalid TX descriptor %d (must be < nb_txd=%d)\n",
+              txdesc_id, nb_txd);
+       return 1;
+}
+
+static const struct rte_memzone *
+ring_dma_zone_lookup(const char *ring_name, uint8_t port_id, uint16_t q_id)
+{
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;
+
+       rte_snprintf(mz_name, sizeof(mz_name), "%s_%s_%d_%d",
+                ports[port_id].dev_info.driver_name, ring_name, port_id, q_id);
+       mz = rte_memzone_lookup(mz_name);
+       if (mz == NULL)
+               printf("%s ring memory zoneof (port %d, queue %d) not"
+                      "found (zone name = %s\n",
+                      ring_name, port_id, q_id, mz_name);
+       return (mz);
+}
+
+union igb_ring_dword {
+       uint64_t dword;
+       struct {
+               uint32_t hi;
+               uint32_t lo;
+       } words;
+};
+
+struct igb_ring_desc {
+       union igb_ring_dword lo_dword;
+       union igb_ring_dword hi_dword;
+};
+
+static void
+ring_descriptor_display(const struct rte_memzone *ring_mz, uint16_t desc_id)
+{
+       struct igb_ring_desc *ring;
+       struct igb_ring_desc rd;
+
+       ring = (struct igb_ring_desc *) ring_mz->addr;
+       rd.lo_dword = rte_le_to_cpu_64(ring[desc_id].lo_dword);
+       rd.hi_dword = rte_le_to_cpu_64(ring[desc_id].hi_dword);
+       printf("    0x%08X - 0x%08X / 0x%08X - 0x%08X\n",
+               (unsigned)rd.lo_dword.words.lo, (unsigned)rd.lo_dword.words.hi,
+               (unsigned)rd.hi_dword.words.lo, (unsigned)rd.hi_dword.words.hi);
+}
+
+void
+rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id)
+{
+       const struct rte_memzone *rx_mz;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (rx_queue_id_is_invalid(rxq_id))
+               return;
+       if (rx_desc_id_is_invalid(rxd_id))
+               return;
+       rx_mz = ring_dma_zone_lookup("rx_ring", port_id, rxq_id);
+       if (rx_mz == NULL)
+               return;
+       ring_descriptor_display(rx_mz, rxd_id);
+}
+
+void
+tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id)
+{
+       const struct rte_memzone *tx_mz;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (tx_queue_id_is_invalid(txq_id))
+               return;
+       if (tx_desc_id_is_invalid(txd_id))
+               return;
+       tx_mz = ring_dma_zone_lookup("tx_ring", port_id, txq_id);
+       if (tx_mz == NULL)
+               return;
+       ring_descriptor_display(tx_mz, txd_id);
+}
+
+void
+fwd_lcores_config_display(void)
+{
+       lcoreid_t lc_id;
+
+       printf("List of forwarding lcores:");
+       for (lc_id = 0; lc_id < nb_cfg_lcores; lc_id++)
+               printf(" %2u", fwd_lcores_cpuids[lc_id]);
+       printf("\n");
+}
+void
+rxtx_config_display(void)
+{
+       printf("  %s packet forwarding - CRC stripping %s - "
+              "packets/burst=%d\n", cur_fwd_eng->fwd_mode_name,
+              rx_mode.hw_strip_crc ? "enabled" : "disabled",
+              nb_pkt_per_burst);
+
+       if (cur_fwd_eng == &tx_only_engine)
+               printf("  packet len=%u - nb packet segments=%d\n",
+                               (unsigned)tx_pkt_length, (int) tx_pkt_nb_segs);
+
+       printf("  nb forwarding cores=%d - nb forwarding ports=%d\n",
+              nb_fwd_lcores, nb_fwd_ports);
+       printf("  RX queues=%d - RX desc=%d - RX free threshold=%d\n",
+              nb_rxq, nb_rxd, rx_free_thresh);
+       printf("  RX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
+              rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh);
+       printf("  TX queues=%d - TX desc=%d - TX free threshold=%d\n",
+              nb_txq, nb_txd, tx_free_thresh);
+       printf("  TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n",
+              tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);
+       printf("  TX RS bit threshold=%d\n", tx_rs_thresh);
+}
+
+/*
+ * Setup forwarding configuration for each logical core.
+ */
+static void
+setup_fwd_config_of_each_lcore(struct fwd_config *cfg)
+{
+       streamid_t nb_fs_per_lcore;
+       streamid_t nb_fs;
+       streamid_t sm_id;
+       lcoreid_t  nb_extra;
+       lcoreid_t  nb_fc;
+       lcoreid_t  nb_lc;
+       lcoreid_t  lc_id;
+
+       nb_fs = cfg->nb_fwd_streams;
+       nb_fc = cfg->nb_fwd_lcores;
+       if (nb_fs <= nb_fc) {
+               nb_fs_per_lcore = 1;
+               nb_extra = 0;
+       } else {
+               nb_fs_per_lcore = (streamid_t) (nb_fs / nb_fc);
+               nb_extra = (lcoreid_t) (nb_fs % nb_fc);
+       }
+       nb_extra = (lcoreid_t) (nb_fs % nb_fc);
+
+       nb_lc = (lcoreid_t) (nb_fc - nb_extra);
+       sm_id = 0;
+       for (lc_id = 0; lc_id < nb_lc; lc_id++) {
+               fwd_lcores[lc_id]->stream_idx = sm_id;
+               fwd_lcores[lc_id]->stream_nb = nb_fs_per_lcore;
+               sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
+       }
+
+       /*
+        * Assign extra remaining streams, if any.
+        */
+       nb_fs_per_lcore = (streamid_t) (nb_fs_per_lcore + 1);
+       for (lc_id = 0; lc_id < nb_extra; lc_id++) {
+               fwd_lcores[nb_lc + lc_id]->stream_idx = sm_id;
+               fwd_lcores[nb_lc + lc_id]->stream_nb = nb_fs_per_lcore;
+               sm_id = (streamid_t) (sm_id + nb_fs_per_lcore);
+       }
+}
+
+static void
+simple_fwd_config_setup(void)
+{
+       portid_t i;
+       portid_t j;
+       portid_t inc = 2;
+
+       if (nb_fwd_ports % 2) {
+               if (port_topology == PORT_TOPOLOGY_CHAINED) {
+                       inc = 1;
+               }
+               else {
+                       printf("\nWarning! Cannot handle an odd number of ports "
+                              "with the current port topology. Configuration "
+                              "must be changed to have an even number of ports, "
+                              "or relaunch application with "
+                              "--port-topology=chained\n\n");
+               }
+       }
+
+       cur_fwd_config.nb_fwd_ports = (portid_t) nb_fwd_ports;
+       cur_fwd_config.nb_fwd_streams =
+               (streamid_t) cur_fwd_config.nb_fwd_ports;
+
+       /*
+        * In the simple forwarding test, the number of forwarding cores
+        * must be lower or equal to the number of forwarding ports.
+        */
+       cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+       if (cur_fwd_config.nb_fwd_lcores > cur_fwd_config.nb_fwd_ports)
+               cur_fwd_config.nb_fwd_lcores =
+                       (lcoreid_t) cur_fwd_config.nb_fwd_ports;
+       setup_fwd_config_of_each_lcore(&cur_fwd_config);
+
+       for (i = 0; i < cur_fwd_config.nb_fwd_ports; i = (portid_t) (i + inc)) {
+               j = (portid_t) ((i + 1) % cur_fwd_config.nb_fwd_ports);
+               fwd_streams[i]->rx_port   = fwd_ports_ids[i];
+               fwd_streams[i]->rx_queue  = 0;
+               fwd_streams[i]->tx_port   = fwd_ports_ids[j];
+               fwd_streams[i]->tx_queue  = 0;
+               fwd_streams[i]->peer_addr = j;
+
+               if (port_topology == PORT_TOPOLOGY_PAIRED) {
+                       fwd_streams[j]->rx_port   = fwd_ports_ids[j];
+                       fwd_streams[j]->rx_queue  = 0;
+                       fwd_streams[j]->tx_port   = fwd_ports_ids[i];
+                       fwd_streams[j]->tx_queue  = 0;
+                       fwd_streams[j]->peer_addr = i;
+               }
+       }
+}
+
+/**
+ * For the RSS forwarding test, each core is assigned on every port a transmit
+ * queue whose index is the index of the core itself. This approach limits the
+ * maximumm number of processing cores of the RSS test to the maximum number of
+ * TX queues supported by the devices.
+ *
+ * Each core is assigned a single stream, each stream being composed of
+ * a RX queue to poll on a RX port for input messages, associated with
+ * a TX queue of a TX port where to send forwarded packets.
+ * All packets received on the RX queue of index "RxQj" of the RX port "RxPi"
+ * are sent on the TX queue "TxQl" of the TX port "TxPk" according to the two
+ * following rules:
+ *    - TxPk = (RxPi + 1) if RxPi is even, (RxPi - 1) if RxPi is odd
+ *    - TxQl = RxQj
+ */
+static void
+rss_fwd_config_setup(void)
+{
+       portid_t   rxp;
+       portid_t   txp;
+       queueid_t  rxq;
+       queueid_t  nb_q;
+       lcoreid_t  lc_id;
+
+       nb_q = nb_rxq;
+       if (nb_q > nb_txq)
+               nb_q = nb_txq;
+       cur_fwd_config.nb_fwd_lcores = (lcoreid_t) nb_fwd_lcores;
+       cur_fwd_config.nb_fwd_ports = nb_fwd_ports;
+       cur_fwd_config.nb_fwd_streams =
+               (streamid_t) (nb_q * cur_fwd_config.nb_fwd_ports);
+       if (cur_fwd_config.nb_fwd_streams > cur_fwd_config.nb_fwd_lcores)
+               cur_fwd_config.nb_fwd_streams =
+                       (streamid_t)cur_fwd_config.nb_fwd_lcores;
+       else
+               cur_fwd_config.nb_fwd_lcores =
+                       (lcoreid_t)cur_fwd_config.nb_fwd_streams;
+       setup_fwd_config_of_each_lcore(&cur_fwd_config);
+       rxp = 0; rxq = 0;
+       for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++) {
+               struct fwd_stream *fs;
+
+               fs = fwd_streams[lc_id];
+               if ((rxp & 0x1) == 0)
+                       txp = (portid_t) (rxp + 1);
+               else
+                       txp = (portid_t) (rxp - 1);
+               fs->rx_port = fwd_ports_ids[rxp];
+               fs->rx_queue = rxq;
+               fs->tx_port = fwd_ports_ids[txp];
+               fs->tx_queue = rxq;
+               fs->peer_addr = fs->tx_port;
+               rxq = (queueid_t) (rxq + 1);
+               if (rxq < nb_q)
+                       continue;
+               /*
+                * rxq == nb_q
+                * Restart from RX queue 0 on next RX port
+                */
+               rxq = 0;
+               if (numa_support && (nb_fwd_ports <= (nb_ports >> 1)))
+                       rxp = (portid_t)
+                               (rxp + ((nb_ports >> 1) / nb_fwd_ports));
+               else
+                       rxp = (portid_t) (rxp + 1);
+       }
+}
+
+void
+fwd_config_setup(void)
+{
+       cur_fwd_config.fwd_eng = cur_fwd_eng;
+       if ((nb_rxq > 1) && (nb_txq > 1))
+               rss_fwd_config_setup();
+       else
+               simple_fwd_config_setup();
+}
+
+static void
+pkt_fwd_config_display(struct fwd_config *cfg)
+{
+       struct fwd_stream *fs;
+       lcoreid_t  lc_id;
+       streamid_t sm_id;
+
+       printf("%s packet forwarding - ports=%d - cores=%d - streams=%d - "
+              "NUMA support %s\n",
+              cfg->fwd_eng->fwd_mode_name,
+              cfg->nb_fwd_ports, cfg->nb_fwd_lcores, cfg->nb_fwd_streams,
+              numa_support == 1 ? "enabled" : "disabled");
+       for (lc_id = 0; lc_id < cfg->nb_fwd_lcores; lc_id++) {
+               printf("Logical Core %u (socket %u) forwards packets on "
+                      "%d streams:",
+                      fwd_lcores_cpuids[lc_id],
+                      rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]),
+                      fwd_lcores[lc_id]->stream_nb);
+               for (sm_id = 0; sm_id < fwd_lcores[lc_id]->stream_nb; sm_id++) {
+                       fs = fwd_streams[fwd_lcores[lc_id]->stream_idx + sm_id];
+                       printf("\n  RX P=%d/Q=%d (socket %u) -> TX "
+                              "P=%d/Q=%d (socket %u) ",
+                              fs->rx_port, fs->rx_queue,
+                              ports[fs->rx_port].socket_id,
+                              fs->tx_port, fs->tx_queue,
+                              ports[fs->tx_port].socket_id);
+                       print_ethaddr("peer=",
+                                     &peer_eth_addrs[fs->peer_addr]);
+               }
+               printf("\n");
+       }
+       printf("\n");
+}
+
+
+void
+fwd_config_display(void)
+{
+       fwd_config_setup();
+       pkt_fwd_config_display(&cur_fwd_config);
+}
+
+void
+set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc)
+{
+       unsigned int i;
+       unsigned int lcore_cpuid;
+       int record_now;
+
+       record_now = 0;
+ again:
+       for (i = 0; i < nb_lc; i++) {
+               lcore_cpuid = lcorelist[i];
+               if (! rte_lcore_is_enabled(lcore_cpuid)) {
+                       printf("Logical core %u not enabled\n", lcore_cpuid);
+                       return;
+               }
+               if (lcore_cpuid == rte_get_master_lcore()) {
+                       printf("Master core %u cannot forward packets\n",
+                              lcore_cpuid);
+                       return;
+               }
+               if (record_now)
+                       fwd_lcores_cpuids[i] = lcore_cpuid;
+       }
+       if (record_now == 0) {
+               record_now = 1;
+               goto again;
+       }
+       nb_cfg_lcores = (lcoreid_t) nb_lc;
+       if (nb_fwd_lcores != (lcoreid_t) nb_lc) {
+               printf("previous number of forwarding cores %u - changed to "
+                      "number of configured cores %u\n",
+                      (unsigned int) nb_fwd_lcores, nb_lc);
+               nb_fwd_lcores = (lcoreid_t) nb_lc;
+       }
+}
+
+void
+set_fwd_lcores_mask(uint64_t lcoremask)
+{
+       unsigned int lcorelist[64];
+       unsigned int nb_lc;
+       unsigned int i;
+
+       if (lcoremask == 0) {
+               printf("Invalid NULL mask of cores\n");
+               return;
+       }
+       nb_lc = 0;
+       for (i = 0; i < 64; i++) {
+               if (! ((uint64_t)(1ULL << i) & lcoremask))
+                       continue;
+               lcorelist[nb_lc++] = i;
+       }
+       set_fwd_lcores_list(lcorelist, nb_lc);
+}
+
+void
+set_fwd_lcores_number(uint16_t nb_lc)
+{
+       if (nb_lc > nb_cfg_lcores) {
+               printf("nb fwd cores %u > %u (max. number of configured "
+                      "lcores) - ignored\n",
+                      (unsigned int) nb_lc, (unsigned int) nb_cfg_lcores);
+               return;
+       }
+       nb_fwd_lcores = (lcoreid_t) nb_lc;
+       printf("Number of forwarding cores set to %u\n",
+              (unsigned int) nb_fwd_lcores);
+}
+
+void
+set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt)
+{
+       unsigned int i;
+       portid_t port_id;
+       int record_now;
+
+       record_now = 0;
+ again:
+       for (i = 0; i < nb_pt; i++) {
+               port_id = (portid_t) portlist[i];
+               if (port_id >= nb_ports) {
+                       printf("Invalid port id %u > %u\n",
+                              (unsigned int) port_id,
+                              (unsigned int) nb_ports);
+                       return;
+               }
+               if (record_now)
+                       fwd_ports_ids[i] = port_id;
+       }
+       if (record_now == 0) {
+               record_now = 1;
+               goto again;
+       }
+       nb_cfg_ports = (portid_t) nb_pt;
+       if (nb_fwd_ports != (portid_t) nb_pt) {
+               printf("previous number of forwarding ports %u - changed to "
+                      "number of configured ports %u\n",
+                      (unsigned int) nb_fwd_ports, nb_pt);
+               nb_fwd_ports = (portid_t) nb_pt;
+       }
+}
+
+void
+set_fwd_ports_mask(uint64_t portmask)
+{
+       unsigned int portlist[64];
+       unsigned int nb_pt;
+       unsigned int i;
+
+       if (portmask == 0) {
+               printf("Invalid NULL mask of ports\n");
+               return;
+       }
+       nb_pt = 0;
+       for (i = 0; i < 64; i++) {
+               if (! ((uint64_t)(1ULL << i) & portmask))
+                       continue;
+               portlist[nb_pt++] = i;
+       }
+       set_fwd_ports_list(portlist, nb_pt);
+}
+
+void
+set_fwd_ports_number(uint16_t nb_pt)
+{
+       if (nb_pt > nb_cfg_ports) {
+               printf("nb fwd ports %u > %u (number of configured "
+                      "ports) - ignored\n",
+                      (unsigned int) nb_pt, (unsigned int) nb_cfg_ports);
+               return;
+       }
+       nb_fwd_ports = (portid_t) nb_pt;
+       printf("Number of forwarding ports set to %u\n",
+              (unsigned int) nb_fwd_ports);
+}
+
+void
+set_nb_pkt_per_burst(uint16_t nb)
+{
+       if (nb > MAX_PKT_BURST) {
+               printf("nb pkt per burst: %u > %u (maximum packet per burst) "
+                      " ignored\n",
+                      (unsigned int) nb, (unsigned int) MAX_PKT_BURST);
+               return;
+       }
+       nb_pkt_per_burst = nb;
+       printf("Number of packets per burst set to %u\n",
+              (unsigned int) nb_pkt_per_burst);
+}
+
+void
+set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
+{
+       uint16_t tx_pkt_len;
+       unsigned i;
+
+       if (nb_segs >= (unsigned) nb_txd) {
+               printf("nb segments per TX packets=%u >= nb_txd=%u - ignored\n",
+                      nb_segs, (unsigned int) nb_txd);
+               return;
+       }
+
+       /*
+        * Check that each segment length is greater or equal than
+        * the mbuf data sise.
+        * Check also that the total packet length is greater or equal than the
+        * size of an empty UDP/IP packet (sizeof(struct ether_hdr) + 20 + 8).
+        */
+       tx_pkt_len = 0;
+       for (i = 0; i < nb_segs; i++) {
+               if (seg_lengths[i] > (unsigned) mbuf_data_size) {
+                       printf("length[%u]=%u > mbuf_data_size=%u - give up\n",
+                              i, seg_lengths[i], (unsigned) mbuf_data_size);
+                       return;
+               }
+               tx_pkt_len = (uint16_t)(tx_pkt_len + seg_lengths[i]);
+       }
+       if (tx_pkt_len < (sizeof(struct ether_hdr) + 20 + 8)) {
+               printf("total packet length=%u < %d - give up\n",
+                               (unsigned) tx_pkt_len,
+                               (int)(sizeof(struct ether_hdr) + 20 + 8));
+               return;
+       }
+
+       for (i = 0; i < nb_segs; i++)
+               tx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
+
+       tx_pkt_length  = tx_pkt_len;
+       tx_pkt_nb_segs = (uint8_t) nb_segs;
+}
+
+void
+set_pkt_forwarding_mode(const char *fwd_mode_name)
+{
+       struct fwd_engine *fwd_eng;
+       unsigned i;
+
+       i = 0;
+       while ((fwd_eng = fwd_engines[i]) != NULL) {
+               if (! strcmp(fwd_eng->fwd_mode_name, fwd_mode_name)) {
+                       printf("Set %s packet forwarding mode\n",
+                              fwd_mode_name);
+                       cur_fwd_eng = fwd_eng;
+                       return;
+               }
+               i++;
+       }
+       printf("Invalid %s packet forwarding mode\n", fwd_mode_name);
+}
+
+void
+set_verbose_level(uint16_t vb_level)
+{
+       printf("Change verbose level from %u to %u\n",
+              (unsigned int) verbose_level, (unsigned int) vb_level);
+       verbose_level = vb_level;
+}
+
+void
+rx_vlan_filter_set(portid_t port_id, uint16_t vlan_id, int on)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       if (vlan_id_is_invalid(vlan_id))
+               return;
+       diag = rte_eth_dev_vlan_filter(port_id, vlan_id, on);
+       if (diag == 0)
+               return;
+       printf("rte_eth_dev_vlan_filter(port_pi=%d, vlan_id=%d, on=%d) failed "
+              "diag=%d\n",
+              port_id, vlan_id, on, diag);
+}
+
+void
+rx_vlan_all_filter_set(portid_t port_id, int on)
+{
+       uint16_t vlan_id;
+
+       if (port_id_is_invalid(port_id))
+               return;
+       for (vlan_id = 0; vlan_id < 4096; vlan_id++)
+               rx_vlan_filter_set(port_id, vlan_id, on);
+}
+
+void
+tx_vlan_set(portid_t port_id, uint16_t vlan_id)
+{
+       if (port_id_is_invalid(port_id))
+               return;
+       if (vlan_id_is_invalid(vlan_id))
+               return;
+       ports[port_id].tx_ol_flags |= PKT_TX_VLAN_PKT;
+       ports[port_id].tx_vlan_id = vlan_id;
+}
+
+void
+tx_vlan_reset(portid_t port_id)
+{
+       if (port_id_is_invalid(port_id))
+               return;
+       ports[port_id].tx_ol_flags &= ~PKT_TX_VLAN_PKT;
+}
+
+void
+tx_cksum_set(portid_t port_id, uint8_t cksum_mask)
+{
+       uint16_t tx_ol_flags;
+       if (port_id_is_invalid(port_id))
+               return;
+       /* Clear last 4 bits and then set L3/4 checksum mask again */
+       tx_ol_flags = (uint16_t) (ports[port_id].tx_ol_flags & 0xFFF0);
+       ports[port_id].tx_ol_flags = (uint16_t) ((cksum_mask & 0xf) | tx_ol_flags);
+}
+
+void
+fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
+                         struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_add_signature_filter(port_id, fdir_filter,
+                                                    queue_id);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
+
+void
+fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
+                            struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_update_signature_filter(port_id, fdir_filter,
+                                                       queue_id);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_update_signature_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
+
+void
+fdir_remove_signature_filter(portid_t port_id,
+                            struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_remove_signature_filter(port_id, fdir_filter);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_add_signature_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+
+}
+
+void
+fdir_get_infos(portid_t port_id)
+{
+       struct rte_eth_fdir fdir_infos;
+
+       static const char *fdir_stats_border = "########################";
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       rte_eth_dev_fdir_get_infos(port_id, &fdir_infos);
+
+       printf("\n  %s FDIR infos for port %-2d %s\n",
+              fdir_stats_border, port_id, fdir_stats_border);
+
+       printf("  collision: %-10"PRIu64" free: %-10"PRIu64"\n"
+              "  maxhash: %-10"PRIu64" maxlen: %-10"PRIu64"\n"
+              "  add : %-10"PRIu64"   remove : %-10"PRIu64"\n"
+              "  f_add: %-10"PRIu64" f_remove: %-10"PRIu64"\n",
+              (uint64_t)(fdir_infos.collision), (uint64_t)(fdir_infos.free),
+              (uint64_t)(fdir_infos.maxhash), (uint64_t)(fdir_infos.maxlen),
+              fdir_infos.add, fdir_infos.remove,
+              fdir_infos.f_add, fdir_infos.f_remove);
+       printf("  %s############################%s\n",
+              fdir_stats_border, fdir_stats_border);
+}
+
+void
+fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
+                       uint8_t drop, struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_add_perfect_filter(port_id, fdir_filter,
+                                                  soft_id, queue_id, drop);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_add_perfect_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
+
+void
+fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id, uint8_t queue_id,
+                          uint8_t drop, struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_update_perfect_filter(port_id, fdir_filter,
+                                                     soft_id, queue_id, drop);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
+
+void
+fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
+                          struct rte_fdir_filter *fdir_filter)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_remove_perfect_filter(port_id, fdir_filter,
+                                                     soft_id);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_fdir_update_perfect_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
+
+void
+fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks)
+{
+       int diag;
+
+       if (port_id_is_invalid(port_id))
+               return;
+
+       diag = rte_eth_dev_fdir_set_masks(port_id, fdir_masks);
+       if (diag == 0)
+               return;
+
+       printf("rte_eth_dev_set_masks_filter for port_id=%d failed "
+              "diag=%d\n", port_id, diag);
+}
diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
new file mode 100644 (file)
index 0000000..7aabcde
--- /dev/null
@@ -0,0 +1,449 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include "testpmd.h"
+
+
+
+#define IP_DEFTTL  64   /* from RFC 1340. */
+#define IP_VERSION 0x40
+#define IP_HDRLEN  0x05 /* default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+/* Pseudo Header for IPv4/UDP/TCP checksum */
+struct psd_header {
+       uint32_t src_addr; /* IP address of source host. */
+       uint32_t dst_addr; /* IP address of destination host(s). */
+       uint8_t  zero;     /* zero. */
+       uint8_t  proto;    /* L4 protocol type. */
+       uint16_t len;      /* L4 length. */
+} __attribute__((__packed__));
+
+
+/* Pseudo Header for IPv6/UDP/TCP checksum */
+struct ipv6_psd_header {
+       uint8_t src_addr[16]; /* IP address of source host. */
+       uint8_t dst_addr[16]; /* IP address of destination host(s). */
+       uint32_t len;         /* L4 length. */
+       uint8_t  zero[3];     /* zero. */
+       uint8_t  proto;       /* L4 protocol. */
+} __attribute__((__packed__));
+
+
+static inline uint16_t
+get_16b_sum(uint16_t *ptr16, uint32_t nr)
+{
+       uint32_t sum = 0;
+       while (nr > 1)
+       {
+               sum +=*ptr16;
+               nr -= sizeof(uint16_t);
+               ptr16++;
+               if (sum > UINT16_MAX)
+                       sum -= UINT16_MAX;
+       }
+
+       /* If length is in odd bytes */
+       if (nr)
+               sum += *((uint8_t*)ptr16);
+
+       sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
+       sum &= 0x0ffff;
+       return (uint16_t)sum;
+}
+
+static inline uint16_t
+get_ipv4_cksum(struct ipv4_hdr *ipv4_hdr)
+{
+       uint16_t cksum;
+       cksum = get_16b_sum((uint16_t*)ipv4_hdr, sizeof(struct ipv4_hdr));
+       return (uint16_t)((cksum == 0xffff)?cksum:~cksum);
+}
+
+
+static inline
+uint16_t get_ipv4_psd_sum (struct ipv4_hdr * ip_hdr)
+{
+       struct psd_header psd_hdr;
+       psd_hdr.src_addr = ip_hdr->src_addr;
+       psd_hdr.dst_addr = ip_hdr->dst_addr;
+       psd_hdr.zero     = 0;
+       psd_hdr.proto    = ip_hdr->next_proto_id;
+       psd_hdr.len      = rte_cpu_to_be_16((uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length)
+                               - sizeof(struct ipv4_hdr)));
+       return get_16b_sum((uint16_t*)&psd_hdr, sizeof(struct psd_header));
+}
+
+static inline
+uint16_t get_ipv6_psd_sum (struct ipv6_hdr * ip_hdr)
+{
+       struct ipv6_psd_header psd_hdr;
+       rte_memcpy(psd_hdr.src_addr, ip_hdr->src_addr, sizeof(ip_hdr->src_addr)
+                       + sizeof(ip_hdr->dst_addr));
+
+       psd_hdr.zero[0]   = 0;
+       psd_hdr.zero[1]   = 0;
+       psd_hdr.zero[2]   = 0;
+       psd_hdr.proto     = ip_hdr->proto;
+       psd_hdr.len       = ip_hdr->payload_len;
+
+       return get_16b_sum((uint16_t*)&psd_hdr, sizeof(struct ipv6_psd_header));
+}
+
+static inline uint16_t
+get_ipv4_udptcp_checksum(struct ipv4_hdr *ipv4_hdr, uint16_t *l4_hdr)
+{
+       uint32_t cksum;
+       uint32_t l4_len;
+
+       l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - sizeof(struct ipv4_hdr);
+
+       cksum = get_16b_sum(l4_hdr, l4_len);
+       cksum += get_ipv4_psd_sum(ipv4_hdr);
+
+       cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+       cksum = (~cksum) & 0xffff;
+       if (cksum == 0)
+               cksum = 0xffff;
+       return (uint16_t)cksum;
+
+}
+
+static inline uint16_t
+get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr)
+{
+       uint32_t cksum;
+       uint32_t l4_len;
+
+       l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
+
+       cksum = get_16b_sum(l4_hdr, l4_len);
+       cksum += get_ipv6_psd_sum(ipv6_hdr);
+
+       cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
+       cksum = (~cksum) & 0xffff;
+       if (cksum == 0)
+               cksum = 0xffff;
+
+       return (uint16_t)cksum;
+}
+
+
+/*
+ * Forwarding of packets. Change the checksum field with HW or SW methods
+ * The HW/SW method selection depends on the ol_flags on every packet
+ */
+static void
+pkt_burst_checksum_forward(struct fwd_stream *fs)
+{
+       struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
+       struct rte_port  *txp;
+       struct rte_mbuf  *mb;
+       struct ether_hdr *eth_hdr;
+       struct ipv4_hdr  *ipv4_hdr;
+       struct ipv6_hdr  *ipv6_hdr;
+       struct udp_hdr   *udp_hdr;
+       struct tcp_hdr   *tcp_hdr;
+       struct sctp_hdr  *sctp_hdr;
+
+       uint16_t nb_rx;
+       uint16_t nb_tx;
+       uint16_t i;
+       uint16_t ol_flags;
+       uint16_t pkt_ol_flags;
+       uint16_t tx_ol_flags;
+       uint16_t l4_proto;
+       uint8_t  l2_len;
+       uint8_t  l3_len;
+
+       uint32_t rx_bad_ip_csum;
+       uint32_t rx_bad_l4_csum;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t start_tsc;
+       uint64_t end_tsc;
+       uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       start_tsc = rte_rdtsc();
+#endif
+
+       /*
+        * Receive a burst of packets and forward them.
+        */
+       nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+                                nb_pkt_per_burst);
+       if (unlikely(nb_rx == 0))
+               return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+       fs->rx_packets += nb_rx;
+       rx_bad_ip_csum = 0;
+       rx_bad_l4_csum = 0;
+
+       txp = &ports[fs->tx_port];
+       tx_ol_flags = txp->tx_ol_flags;
+
+       for (i = 0; i < nb_rx; i++) {
+
+               mb = pkts_burst[i];
+               l2_len  = sizeof(struct ether_hdr);
+               pkt_ol_flags = mb->ol_flags;
+               ol_flags = (uint16_t) (pkt_ol_flags & (~PKT_TX_L4_MASK));
+
+               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               if (rte_be_to_cpu_16(eth_hdr->ether_type) == ETHER_TYPE_VLAN) {
+                       /* Only allow single VLAN label here */
+                       l2_len  += sizeof(struct vlan_hdr);
+               }
+
+               /* Update the L3/L4 checksum error packet count  */
+               rx_bad_ip_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
+               rx_bad_l4_csum += (uint16_t) ((pkt_ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
+
+               /*
+                * Simplify the protocol parsing
+                * Assuming the incoming packets format as
+                *      Ethernet2 + optional single VLAN
+                *      + ipv4 or ipv6
+                *      + udp or tcp or sctp or others
+                */
+               if (pkt_ol_flags & PKT_RX_IPV4_HDR) {
+
+                       /* Do not support ipv4 option field */
+                       l3_len = sizeof(struct ipv4_hdr) ;
+
+                       ipv4_hdr = (struct ipv4_hdr *) (rte_pktmbuf_mtod(mb,
+                                       unsigned char *) + l2_len);
+
+                       l4_proto = ipv4_hdr->next_proto_id;
+
+                       /* Do not delete, this is required by HW*/
+                       ipv4_hdr->hdr_checksum = 0;
+
+                       if (tx_ol_flags & 0x1) {
+                               /* HW checksum */
+                               ol_flags |= PKT_TX_IP_CKSUM;
+                       }
+                       else {
+                               /* SW checksum calculation */
+                               ipv4_hdr->src_addr++;
+                               ipv4_hdr->hdr_checksum = get_ipv4_cksum(ipv4_hdr);
+                       }
+
+                       if (l4_proto == IPPROTO_UDP) {
+                               udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+                               if (tx_ol_flags & 0x2) {
+                                       /* HW Offload */
+                                       ol_flags |= PKT_TX_UDP_CKSUM;
+                                       /* Pseudo header sum need be set properly */
+                                       udp_hdr->dgram_cksum = get_ipv4_psd_sum(ipv4_hdr);
+                               }
+                               else {
+                                       /* SW Implementation, clear checksum field first */
+                                       udp_hdr->dgram_cksum = 0;
+                                       udp_hdr->dgram_cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
+                                                       (uint16_t*)udp_hdr);
+                               }
+                       }
+                       else if (l4_proto == IPPROTO_TCP){
+                               tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+                               if (tx_ol_flags & 0x4) {
+                                       ol_flags |= PKT_TX_TCP_CKSUM;
+                                       tcp_hdr->cksum = get_ipv4_psd_sum(ipv4_hdr);
+                               }
+                               else {
+                                       tcp_hdr->cksum = 0;
+                                       tcp_hdr->cksum = get_ipv4_udptcp_checksum(ipv4_hdr,
+                                                       (uint16_t*)tcp_hdr);
+                               }
+                       }
+                       else if (l4_proto == IPPROTO_SCTP) {
+                               sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+
+                               if (tx_ol_flags & 0x8) {
+                                       ol_flags |= PKT_TX_SCTP_CKSUM;
+                                       sctp_hdr->cksum = 0;
+
+                                       /* Sanity check, only number of 4 bytes supported */
+                                       if ((rte_be_to_cpu_16(ipv4_hdr->total_length) % 4) != 0)
+                                               printf("sctp payload must be a multiple "
+                                                       "of 4 bytes for checksum offload");
+                               }
+                               else {
+                                       sctp_hdr->cksum = 0;
+                                       /* CRC32c sample code available in RFC3309 */
+                               }
+                       }
+                       /* End of L4 Handling*/
+               }
+
+               else {
+                       ipv6_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(mb,
+                                       unsigned char *) + l2_len);
+                       l3_len = sizeof(struct ipv6_hdr) ;
+                       l4_proto = ipv6_hdr->proto;
+
+                       if (l4_proto == IPPROTO_UDP) {
+                               udp_hdr = (struct udp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+                               if (tx_ol_flags & 0x2) {
+                                       /* HW Offload */
+                                       ol_flags |= PKT_TX_UDP_CKSUM;
+                                       udp_hdr->dgram_cksum = get_ipv6_psd_sum(ipv6_hdr);
+                               }
+                               else {
+                                       /* SW Implementation */
+                                       /* checksum field need be clear first */
+                                       udp_hdr->dgram_cksum = 0;
+                                       udp_hdr->dgram_cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
+                                                       (uint16_t*)udp_hdr);
+                               }
+                       }
+                       else if (l4_proto == IPPROTO_TCP) {
+                               tcp_hdr = (struct tcp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+                               if (tx_ol_flags & 0x4) {
+                                       ol_flags |= PKT_TX_TCP_CKSUM;
+                                       tcp_hdr->cksum = get_ipv6_psd_sum(ipv6_hdr);
+                               }
+                               else {
+                                       tcp_hdr->cksum = 0;
+                                       tcp_hdr->cksum = get_ipv6_udptcp_checksum(ipv6_hdr,
+                                                       (uint16_t*)tcp_hdr);
+                               }
+                       }
+                       else if (l4_proto == IPPROTO_SCTP) {
+                               sctp_hdr = (struct sctp_hdr*) (rte_pktmbuf_mtod(mb,
+                                               unsigned char *) + l2_len + l3_len);
+
+                               if (tx_ol_flags & 0x8) {
+                                       ol_flags |= PKT_TX_SCTP_CKSUM;
+                                       sctp_hdr->cksum = 0;
+                                       /* Sanity check, only number of 4 bytes supported by HW */
+                                       if ((rte_be_to_cpu_16(ipv6_hdr->payload_len) % 4) != 0)
+                                               printf("sctp payload must be a multiple "
+                                                       "of 4 bytes for checksum offload");
+                               }
+                               else {
+                                       /* CRC32c sample code available in RFC3309 */
+                                       sctp_hdr->cksum = 0;
+                               }
+                       } else {
+                               printf("Test flow control for 1G PMD \n");
+                       }
+                       /* End of L4 Handling*/
+               }
+
+               /* Combine the packet header write. VLAN is not consider here */
+               mb->pkt.l2_len = l2_len;
+               mb->pkt.l3_len = l3_len;
+               mb->ol_flags = ol_flags;
+       }
+       nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+       fs->tx_packets += nb_tx;
+       fs->rx_bad_ip_csum += rx_bad_ip_csum;
+       fs->rx_bad_l4_csum += rx_bad_l4_csum;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+       if (unlikely(nb_tx < nb_rx)) {
+               fs->fwd_dropped += (nb_rx - nb_tx);
+               do {
+                       rte_pktmbuf_free(pkts_burst[nb_tx]);
+               } while (++nb_tx < nb_rx);
+       }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       end_tsc = rte_rdtsc();
+       core_cycles = (end_tsc - start_tsc);
+       fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+
+struct fwd_engine csum_fwd_engine = {
+       .fwd_mode_name  = "csum",
+       .port_fwd_begin = NULL,
+       .port_fwd_end   = NULL,
+       .packet_fwd     = pkt_burst_checksum_forward,
+};
+
diff --git a/app/test-pmd/ieee1588fwd.c b/app/test-pmd/ieee1588fwd.c
new file mode 100644 (file)
index 0000000..1fbc554
--- /dev/null
@@ -0,0 +1,657 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/**
+ * The structure of a PTP V2 packet.
+ *
+ * Only the minimum fields used by the ieee1588 test are represented.
+ */
+struct ptpv2_msg {
+       uint8_t msg_id;
+       uint8_t version; /**< must be 0x02 */
+       uint8_t unused[34];
+};
+#define PTP_SYNC_MESSAGE                0x0
+#define PTP_DELAY_REQ_MESSAGE           0x1
+#define PTP_PATH_DELAY_REQ_MESSAGE      0x2
+#define PTP_PATH_DELAY_RESP_MESSAGE     0x3
+#define PTP_FOLLOWUP_MESSAGE            0x8
+#define PTP_DELAY_RESP_MESSAGE          0x9
+#define PTP_PATH_DELAY_FOLLOWUP_MESSAGE 0xA
+#define PTP_ANNOUNCE_MESSAGE            0xB
+#define PTP_SIGNALLING_MESSAGE          0xC
+#define PTP_MANAGEMENT_MESSAGE          0xD
+
+/*
+ * Forwarding of IEEE1588 Precise Time Protocol (PTP) packets.
+ *
+ * In this mode, packets are received one by one and are expected to be
+ * PTP V2 L2 Ethernet frames (with the specific Ethernet type "0x88F7")
+ * containing PTP "sync" messages (version 2 at offset 1, and message ID
+ * 0 at offset 0).
+ *
+ * Check that each received packet is a IEEE1588 PTP V2 packet of type
+ * PTP_SYNC_MESSAGE, and that it has been identified and timestamped
+ * by the hardware.
+ * Check that the value of the last RX timestamp recorded by the controller
+ * is greater than the previous one.
+ *
+ * If everything is OK, send the received packet back on the same port,
+ * requesting for it to be timestamped by the hardware.
+ * Check that the value of the last TX timestamp recorded by the controller
+ * is greater than the previous one.
+ */
+
+/*
+ * 1GbE 82576 Kawela registers used for IEEE1588 hardware support
+ */
+#define IGBE_82576_ETQF(n) (0x05CB0 + (4 * (n)))
+#define IGBE_82576_ETQF_FILTER_ENABLE  (1 << 26)
+#define IGBE_82576_ETQF_1588_TIMESTAMP (1 << 30)
+
+#define IGBE_82576_TSYNCRXCTL  0x0B620
+#define IGBE_82576_TSYNCRXCTL_RXTS_ENABLE (1 << 4)
+
+#define IGBE_82576_RXSTMPL     0x0B624
+#define IGBE_82576_RXSTMPH     0x0B628
+#define IGBE_82576_RXSATRL     0x0B62C
+#define IGBE_82576_RXSATRH     0x0B630
+#define IGBE_82576_TSYNCTXCTL  0x0B614
+#define IGBE_82576_TSYNCTXCTL_TXTS_ENABLE (1 << 4)
+
+#define IGBE_82576_TXSTMPL     0x0B618
+#define IGBE_82576_TXSTMPH     0x0B61C
+#define IGBE_82576_SYSTIML     0x0B600
+#define IGBE_82576_SYSTIMH     0x0B604
+#define IGBE_82576_TIMINCA     0x0B608
+#define IGBE_82576_TIMADJL     0x0B60C
+#define IGBE_82576_TIMADJH     0x0B610
+#define IGBE_82576_TSAUXC      0x0B640
+#define IGBE_82576_TRGTTIML0   0x0B644
+#define IGBE_82576_TRGTTIMH0   0x0B648
+#define IGBE_82576_TRGTTIML1   0x0B64C
+#define IGBE_82576_TRGTTIMH1   0x0B650
+#define IGBE_82576_AUXSTMPL0   0x0B65C
+#define IGBE_82576_AUXSTMPH0   0x0B660
+#define IGBE_82576_AUXSTMPL1   0x0B664
+#define IGBE_82576_AUXSTMPH1   0x0B668
+#define IGBE_82576_TSYNCRXCFG  0x05F50
+#define IGBE_82576_TSSDP       0x0003C
+
+/*
+ * 10GbE 82599 Niantic registers used for IEEE1588 hardware support
+ */
+#define IXGBE_82599_ETQF(n) (0x05128 + (4 * (n)))
+#define IXGBE_82599_ETQF_FILTER_ENABLE  (1 << 31)
+#define IXGBE_82599_ETQF_1588_TIMESTAMP (1 << 30)
+
+#define IXGBE_82599_TSYNCRXCTL 0x05188
+#define IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE (1 << 4)
+
+#define IXGBE_82599_RXSTMPL    0x051E8
+#define IXGBE_82599_RXSTMPH    0x051A4
+#define IXGBE_82599_RXSATRL    0x051A0
+#define IXGBE_82599_RXSATRH    0x051A8
+#define IXGBE_82599_RXMTRL     0x05120
+#define IXGBE_82599_TSYNCTXCTL 0x08C00
+#define IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE (1 << 4)
+
+#define IXGBE_82599_TXSTMPL    0x08C04
+#define IXGBE_82599_TXSTMPH    0x08C08
+#define IXGBE_82599_SYSTIML    0x08C0C
+#define IXGBE_82599_SYSTIMH    0x08C10
+#define IXGBE_82599_TIMINCA    0x08C14
+#define IXGBE_82599_TIMADJL    0x08C18
+#define IXGBE_82599_TIMADJH    0x08C1C
+#define IXGBE_82599_TSAUXC     0x08C20
+#define IXGBE_82599_TRGTTIML0  0x08C24
+#define IXGBE_82599_TRGTTIMH0  0x08C28
+#define IXGBE_82599_TRGTTIML1  0x08C2C
+#define IXGBE_82599_TRGTTIMH1  0x08C30
+#define IXGBE_82599_AUXSTMPL0  0x08C3C
+#define IXGBE_82599_AUXSTMPH0  0x08C40
+#define IXGBE_82599_AUXSTMPL1  0x08C44
+#define IXGBE_82599_AUXSTMPH1  0x08C48
+
+/**
+ * Mandatory ETQF register for IEEE1588 packets filter.
+ */
+#define ETQF_FILTER_1588_REG 3
+
+/**
+ * Recommended value for increment and period of
+ * the Increment Attribute Register.
+ */
+#define IEEE1588_TIMINCA_INIT ((0x02 << 24) | 0x00F42400)
+
+/**
+ * Data structure with pointers to port-specific functions.
+ */
+typedef void (*ieee1588_start_t)(portid_t pi); /**< Start IEEE1588 feature. */
+typedef void (*ieee1588_stop_t)(portid_t pi);  /**< Stop IEEE1588 feature.  */
+typedef int  (*tmst_read_t)(portid_t pi, uint64_t *tmst); /**< Read TMST regs */
+
+struct port_ieee1588_ops {
+       ieee1588_start_t ieee1588_start;
+       ieee1588_stop_t  ieee1588_stop;
+       tmst_read_t      rx_tmst_read;
+       tmst_read_t      tx_tmst_read;
+};
+
+/**
+ * 1GbE 82576 IEEE1588 operations.
+ */
+static void
+igbe_82576_ieee1588_start(portid_t pi)
+{
+       uint32_t tsync_ctl;
+
+       /*
+        * Start incrementation of the System Time registers used to
+        * timestamp PTP packets.
+        */
+       port_id_pci_reg_write(pi, IGBE_82576_TIMINCA, IEEE1588_TIMINCA_INIT);
+       port_id_pci_reg_write(pi, IGBE_82576_TSAUXC, 0);
+
+       /*
+        * Enable L2 filtering of IEEE1588 Ethernet frame types.
+        */
+       port_id_pci_reg_write(pi, IGBE_82576_ETQF(ETQF_FILTER_1588_REG),
+                             (ETHER_TYPE_1588 |
+                              IGBE_82576_ETQF_FILTER_ENABLE |
+                              IGBE_82576_ETQF_1588_TIMESTAMP));
+
+       /*
+        * Enable timestamping of received PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL);
+       tsync_ctl |= IGBE_82576_TSYNCRXCTL_RXTS_ENABLE;
+       port_id_pci_reg_write(pi, IGBE_82576_TSYNCRXCTL, tsync_ctl);
+
+       /*
+        * Enable Timestamping of transmitted PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL);
+       tsync_ctl |= IGBE_82576_TSYNCTXCTL_TXTS_ENABLE;
+       port_id_pci_reg_write(pi, IGBE_82576_TSYNCTXCTL, tsync_ctl);
+}
+
+static void
+igbe_82576_ieee1588_stop(portid_t pi)
+{
+       uint32_t tsync_ctl;
+
+       /*
+        * Disable Timestamping of transmitted PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL);
+       tsync_ctl &= ~IGBE_82576_TSYNCTXCTL_TXTS_ENABLE;
+       port_id_pci_reg_write(pi, IGBE_82576_TSYNCTXCTL, tsync_ctl);
+
+       /*
+        * Disable timestamping of received PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL);
+       tsync_ctl &= ~IGBE_82576_TSYNCRXCTL_RXTS_ENABLE;
+       port_id_pci_reg_write(pi, IGBE_82576_TSYNCRXCTL, tsync_ctl);
+
+       /*
+        * Disable L2 filtering of IEEE1588 Ethernet types.
+        */
+       port_id_pci_reg_write(pi, IGBE_82576_ETQF(ETQF_FILTER_1588_REG), 0);
+
+       /*
+        * Stop incrementation of the System Time registers.
+        */
+       port_id_pci_reg_write(pi, IGBE_82576_TIMINCA, 0);
+}
+
+/**
+ * Return the 64-bit value contained in the RX IEEE1588 timestamp registers
+ * of a 1GbE 82576 port.
+ *
+ * @param pi
+ *   The port identifier.
+ *
+ * @param tmst
+ *   The address of a 64-bit variable to return the value of the RX timestamp.
+ *
+ * @return
+ *  -1: the RXSTMPL and RXSTMPH registers of the port are not valid.
+ *   0: the variable pointed to by the "tmst" parameter contains the value
+ *      of the RXSTMPL and RXSTMPH registers of the port.
+ */
+static int
+igbe_82576_rx_timestamp_read(portid_t pi, uint64_t *tmst)
+{
+       uint32_t tsync_rxctl;
+       uint32_t rx_stmpl;
+       uint32_t rx_stmph;
+
+       tsync_rxctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCRXCTL);
+       if ((tsync_rxctl & 0x01) == 0)
+               return (-1);
+
+       rx_stmpl = port_id_pci_reg_read(pi, IGBE_82576_RXSTMPL);
+       rx_stmph = port_id_pci_reg_read(pi, IGBE_82576_RXSTMPH);
+       *tmst = (uint64_t)(((uint64_t) rx_stmph << 32) | rx_stmpl);
+       return (0);
+}
+
+/**
+ * Return the 64-bit value contained in the TX IEEE1588 timestamp registers
+ * of a 1GbE 82576 port.
+ *
+ * @param pi
+ *   The port identifier.
+ *
+ * @param tmst
+ *   The address of a 64-bit variable to return the value of the TX timestamp.
+ *
+ * @return
+ *  -1: the TXSTMPL and TXSTMPH registers of the port are not valid.
+ *   0: the variable pointed to by the "tmst" parameter contains the value
+ *      of the TXSTMPL and TXSTMPH registers of the port.
+ */
+static int
+igbe_82576_tx_timestamp_read(portid_t pi, uint64_t *tmst)
+{
+       uint32_t tsync_txctl;
+       uint32_t tx_stmpl;
+       uint32_t tx_stmph;
+
+       tsync_txctl = port_id_pci_reg_read(pi, IGBE_82576_TSYNCTXCTL);
+       if ((tsync_txctl & 0x01) == 0)
+               return (-1);
+
+       tx_stmpl = port_id_pci_reg_read(pi, IGBE_82576_TXSTMPL);
+       tx_stmph = port_id_pci_reg_read(pi, IGBE_82576_TXSTMPH);
+       *tmst = (uint64_t)(((uint64_t) tx_stmph << 32) | tx_stmpl);
+       return (0);
+}
+
+static struct port_ieee1588_ops igbe_82576_ieee1588_ops = {
+       .ieee1588_start = igbe_82576_ieee1588_start,
+       .ieee1588_stop  = igbe_82576_ieee1588_stop,
+       .rx_tmst_read   = igbe_82576_rx_timestamp_read,
+       .tx_tmst_read   = igbe_82576_tx_timestamp_read,
+};
+
+/**
+ * 10GbE 82599 IEEE1588 operations.
+ */
+static void
+ixgbe_82599_ieee1588_start(portid_t pi)
+{
+       uint32_t tsync_ctl;
+
+       /*
+        * Start incrementation of the System Time registers used to
+        * timestamp PTP packets.
+        */
+       port_id_pci_reg_write(pi, IXGBE_82599_TIMINCA, IEEE1588_TIMINCA_INIT);
+
+       /*
+        * Enable L2 filtering of IEEE1588 Ethernet frame types.
+        */
+       port_id_pci_reg_write(pi, IXGBE_82599_ETQF(ETQF_FILTER_1588_REG),
+                             (ETHER_TYPE_1588 |
+                              IXGBE_82599_ETQF_FILTER_ENABLE |
+                              IXGBE_82599_ETQF_1588_TIMESTAMP));
+
+       /*
+        * Enable timestamping of received PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL);
+       tsync_ctl |= IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE;
+       port_id_pci_reg_write(pi, IXGBE_82599_TSYNCRXCTL, tsync_ctl);
+
+       /*
+        * Enable Timestamping of transmitted PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL);
+       tsync_ctl |= IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE;
+       port_id_pci_reg_write(pi, IXGBE_82599_TSYNCTXCTL, tsync_ctl);
+}
+
+static void
+ixgbe_82599_ieee1588_stop(portid_t pi)
+{
+       uint32_t tsync_ctl;
+
+       /*
+        * Disable Timestamping of transmitted PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL);
+       tsync_ctl &= ~IXGBE_82599_TSYNCTXCTL_TXTS_ENABLE;
+       port_id_pci_reg_write(pi, IXGBE_82599_TSYNCTXCTL, tsync_ctl);
+
+       /*
+        * Disable timestamping of received PTP packets.
+        */
+       tsync_ctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL);
+       tsync_ctl &= ~IXGBE_82599_TSYNCRXCTL_RXTS_ENABLE;
+       port_id_pci_reg_write(pi, IXGBE_82599_TSYNCRXCTL, tsync_ctl);
+
+       /*
+        * Disable L2 filtering of IEEE1588 Ethernet frame types.
+        */
+       port_id_pci_reg_write(pi, IXGBE_82599_ETQF(ETQF_FILTER_1588_REG), 0);
+
+       /*
+        * Stop incrementation of the System Time registers.
+        */
+       port_id_pci_reg_write(pi, IXGBE_82599_TIMINCA, 0);
+}
+
+/**
+ * Return the 64-bit value contained in the RX IEEE1588 timestamp registers
+ * of a 10GbE 82599 port.
+ *
+ * @param pi
+ *   The port identifier.
+ *
+ * @param tmst
+ *   The address of a 64-bit variable to return the value of the TX timestamp.
+ *
+ * @return
+ *  -1: the RX timestamp registers of the port are not valid.
+ *   0: the variable pointed to by the "tmst" parameter contains the value
+ *      of the RXSTMPL and RXSTMPH registers of the port.
+ */
+static int
+ixgbe_82599_rx_timestamp_read(portid_t pi, uint64_t *tmst)
+{
+       uint32_t tsync_rxctl;
+       uint32_t rx_stmpl;
+       uint32_t rx_stmph;
+
+       tsync_rxctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCRXCTL);
+       if ((tsync_rxctl & 0x01) == 0)
+               return (-1);
+
+       rx_stmpl = port_id_pci_reg_read(pi, IXGBE_82599_RXSTMPL);
+       rx_stmph = port_id_pci_reg_read(pi, IXGBE_82599_RXSTMPH);
+       *tmst = (uint64_t)(((uint64_t) rx_stmph << 32) | rx_stmpl);
+       return (0);
+}
+
+/**
+ * Return the 64-bit value contained in the TX IEEE1588 timestamp registers
+ * of a 10GbE 82599 port.
+ *
+ * @param pi
+ *   The port identifier.
+ *
+ * @param tmst
+ *   The address of a 64-bit variable to return the value of the TX timestamp.
+ *
+ * @return
+ *  -1: the TXSTMPL and TXSTMPH registers of the port are not valid.
+ *   0: the variable pointed to by the "tmst" parameter contains the value
+ *      of the TXSTMPL and TXSTMPH registers of the port.
+ */
+static int
+ixgbe_82599_tx_timestamp_read(portid_t pi, uint64_t *tmst)
+{
+       uint32_t tsync_txctl;
+       uint32_t tx_stmpl;
+       uint32_t tx_stmph;
+
+       tsync_txctl = port_id_pci_reg_read(pi, IXGBE_82599_TSYNCTXCTL);
+       if ((tsync_txctl & 0x01) == 0)
+               return (-1);
+
+       tx_stmpl = port_id_pci_reg_read(pi, IXGBE_82599_TXSTMPL);
+       tx_stmph = port_id_pci_reg_read(pi, IXGBE_82599_TXSTMPH);
+       *tmst = (uint64_t)(((uint64_t) tx_stmph << 32) | tx_stmpl);
+       return (0);
+}
+
+static struct port_ieee1588_ops ixgbe_82599_ieee1588_ops = {
+       .ieee1588_start = ixgbe_82599_ieee1588_start,
+       .ieee1588_stop  = ixgbe_82599_ieee1588_stop,
+       .rx_tmst_read   = ixgbe_82599_rx_timestamp_read,
+       .tx_tmst_read   = ixgbe_82599_tx_timestamp_read,
+};
+
+static void
+port_ieee1588_rx_timestamp_check(portid_t pi)
+{
+       struct port_ieee1588_ops *ieee_ops;
+       uint64_t rx_tmst;
+
+       ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx;
+       if (ieee_ops->rx_tmst_read(pi, &rx_tmst) < 0) {
+               printf("Port %u: RX timestamp registers not valid\n",
+                      (unsigned) pi);
+               return;
+       }
+       printf("Port %u RX timestamp value 0x%"PRIu64"\n",
+              (unsigned) pi, rx_tmst);
+}
+
+#define MAX_TX_TMST_WAIT_MICROSECS 1000 /**< 1 milli-second */
+
+static void
+port_ieee1588_tx_timestamp_check(portid_t pi)
+{
+       struct port_ieee1588_ops *ieee_ops;
+       uint64_t tx_tmst;
+       unsigned wait_us;
+
+       ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx;
+       wait_us = 0;
+       while ((ieee_ops->tx_tmst_read(pi, &tx_tmst) < 0) &&
+              (wait_us < MAX_TX_TMST_WAIT_MICROSECS)) {
+               rte_delay_us(1);
+               wait_us++;
+       }
+       if (wait_us >= MAX_TX_TMST_WAIT_MICROSECS) {
+               printf("Port %u: TX timestamp registers not valid after"
+                      "%u micro-seconds\n",
+                      (unsigned) pi, (unsigned) MAX_TX_TMST_WAIT_MICROSECS);
+               return;
+       }
+       printf("Port %u TX timestamp value 0x%"PRIu64" validated after "
+              "%u micro-second%s\n",
+              (unsigned) pi, tx_tmst, wait_us,
+              (wait_us == 1) ? "" : "s");
+}
+
+static void
+ieee1588_packet_fwd(struct fwd_stream *fs)
+{
+       struct rte_mbuf  *mb;
+       struct ether_hdr *eth_hdr;
+       struct ptpv2_msg *ptp_hdr;
+       uint16_t eth_type;
+
+       /*
+        * Receive 1 packet at a time.
+        */
+       if (rte_eth_rx_burst(fs->rx_port, fs->rx_queue, &mb, 1) == 0)
+               return;
+
+       fs->rx_packets += 1;
+
+       /*
+        * Check that the received packet is a PTP packet that was detected
+        * by the hardware.
+        */
+       eth_hdr = (struct ether_hdr *)mb->pkt.data;
+       eth_type = rte_be_to_cpu_16(eth_hdr->ether_type);
+       if (! (mb->ol_flags & PKT_RX_IEEE1588_PTP)) {
+               if (eth_type == ETHER_TYPE_1588) {
+                       printf("Port %u Received PTP packet not filtered"
+                              " by hardware\n",
+                              (unsigned) fs->rx_port);
+               } else {
+                       printf("Port %u Received non PTP packet type=0x%4x "
+                              "len=%u\n",
+                              (unsigned) fs->rx_port, eth_type,
+                              (unsigned) mb->pkt.pkt_len);
+               }
+               rte_pktmbuf_free(mb);
+               return;
+       }
+       if (eth_type != ETHER_TYPE_1588) {
+               printf("Port %u Received NON PTP packet wrongly"
+                      " detected by hardware\n",
+                      (unsigned) fs->rx_port);
+               rte_pktmbuf_free(mb);
+               return;
+       }
+
+       /*
+        * Check that the received PTP packet is a PTP V2 packet of type
+        * PTP_SYNC_MESSAGE.
+        */
+       ptp_hdr = (struct ptpv2_msg *) ((char *) mb->pkt.data +
+                                       sizeof(struct ether_hdr));
+       if (ptp_hdr->version != 0x02) {
+               printf("Port %u Received PTP V2 Ethernet frame with wrong PTP"
+                      " protocol version 0x%x (should be 0x02)\n",
+                      (unsigned) fs->rx_port, ptp_hdr->version);
+               rte_pktmbuf_free(mb);
+               return;
+       }
+       if (ptp_hdr->msg_id != PTP_SYNC_MESSAGE) {
+               printf("Port %u Received PTP V2 Ethernet frame with unexpected"
+                      " messageID 0x%x (expected 0x0 - PTP_SYNC_MESSAGE)\n",
+                      (unsigned) fs->rx_port, ptp_hdr->msg_id);
+               rte_pktmbuf_free(mb);
+               return;
+       }
+       printf("Port %u IEEE1588 PTP V2 SYNC Message filtered by hardware\n",
+              (unsigned) fs->rx_port);
+
+       /*
+        * Check that the received PTP packet has been timestamped by the
+        * hardware.
+        */
+       if (! (mb->ol_flags & PKT_RX_IEEE1588_TMST)) {
+               printf("Port %u Received PTP packet not timestamped"
+                      " by hardware\n",
+                      (unsigned) fs->rx_port);
+               rte_pktmbuf_free(mb);
+               return;
+       }
+
+       /* Check the RX timestamp */
+       port_ieee1588_rx_timestamp_check(fs->rx_port);
+
+       /* Forward PTP packet with hardware TX timestamp */
+       mb->ol_flags |= PKT_TX_IEEE1588_TMST;
+       fs->tx_packets += 1;
+       if (rte_eth_tx_burst(fs->rx_port, fs->tx_queue, &mb, 1) == 0) {
+               printf("Port %u sent PTP packet dropped\n",
+                      (unsigned) fs->rx_port);
+               fs->fwd_dropped += 1;
+               rte_pktmbuf_free(mb);
+               return;
+       }
+
+       /*
+        * Check the TX timestamp.
+        */
+       port_ieee1588_tx_timestamp_check(fs->rx_port);
+}
+
+static void
+port_ieee1588_fwd_begin(portid_t pi)
+{
+       struct port_ieee1588_ops *ieee_ops;
+
+       if (strcmp(ports[pi].dev_info.driver_name, "rte_igb_pmd") == 0)
+               ieee_ops = &igbe_82576_ieee1588_ops;
+       else
+               ieee_ops = &ixgbe_82599_ieee1588_ops;
+       ports[pi].fwd_ctx = ieee_ops;
+       (ieee_ops->ieee1588_start)(pi);
+}
+
+static void
+port_ieee1588_fwd_end(portid_t pi)
+{
+       struct port_ieee1588_ops *ieee_ops;
+
+       ieee_ops = (struct port_ieee1588_ops *)ports[pi].fwd_ctx;
+       (ieee_ops->ieee1588_stop)(pi);
+}
+
+struct fwd_engine ieee1588_fwd_engine = {
+       .fwd_mode_name  = "ieee1588",
+       .port_fwd_begin = port_ieee1588_fwd_begin,
+       .port_fwd_end   = port_ieee1588_fwd_end,
+       .packet_fwd     = ieee1588_packet_fwd,
+};
diff --git a/app/test-pmd/iofwd.c b/app/test-pmd/iofwd.c
new file mode 100644 (file)
index 0000000..3f29f6d
--- /dev/null
@@ -0,0 +1,131 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/*
+ * Forwarding of packets in I/O mode.
+ * Forward packets "as-is".
+ * This is the fastest possible forwarding operation, as it does not access
+ * to packets data.
+ */
+static void
+pkt_burst_io_forward(struct fwd_stream *fs)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       uint16_t nb_rx;
+       uint16_t nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t start_tsc;
+       uint64_t end_tsc;
+       uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       start_tsc = rte_rdtsc();
+#endif
+
+       /*
+        * Receive a burst of packets and forward them.
+        */
+       nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+                                nb_pkt_per_burst);
+       if (unlikely(nb_rx == 0))
+               return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+       fs->rx_packets += nb_rx;
+       nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+       fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+       if (unlikely(nb_tx < nb_rx)) {
+               fs->fwd_dropped += (nb_rx - nb_tx);
+               do {
+                       rte_pktmbuf_free(pkts_burst[nb_tx]);
+               } while (++nb_tx < nb_rx);
+       }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       end_tsc = rte_rdtsc();
+       core_cycles = (end_tsc - start_tsc);
+       fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine io_fwd_engine = {
+       .fwd_mode_name  = "io",
+       .port_fwd_begin = NULL,
+       .port_fwd_end   = NULL,
+       .packet_fwd     = pkt_burst_io_forward,
+};
diff --git a/app/test-pmd/macfwd.c b/app/test-pmd/macfwd.c
new file mode 100644 (file)
index 0000000..8f31e05
--- /dev/null
@@ -0,0 +1,148 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+/*
+ * Forwarding of packets in MAC mode.
+ * Change the source and the destination Ethernet addressed of packets
+ * before forwarding them.
+ */
+static void
+pkt_burst_mac_forward(struct fwd_stream *fs)
+{
+       struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
+       struct rte_port  *txp;
+       struct rte_mbuf  *mb;
+       struct ether_hdr *eth_hdr;
+       uint16_t nb_rx;
+       uint16_t nb_tx;
+       uint16_t i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t start_tsc;
+       uint64_t end_tsc;
+       uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       start_tsc = rte_rdtsc();
+#endif
+
+       /*
+        * Receive a burst of packets and forward them.
+        */
+       nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+                                nb_pkt_per_burst);
+       if (unlikely(nb_rx == 0))
+               return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+       fs->rx_packets += nb_rx;
+       txp = &ports[fs->tx_port];
+       for (i = 0; i < nb_rx; i++) {
+               mb = pkts_burst[i];
+               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
+                               &eth_hdr->d_addr);
+               ether_addr_copy(&ports[fs->tx_port].eth_addr,
+                               &eth_hdr->s_addr);
+               mb->ol_flags = txp->tx_ol_flags;
+               mb->pkt.l2_len = sizeof(struct ether_hdr);
+               mb->pkt.l3_len = sizeof(struct ipv4_hdr);
+               mb->pkt.vlan_tci = txp->tx_vlan_id;
+       }
+       nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
+       fs->tx_packets += nb_tx;
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+       if (unlikely(nb_tx < nb_rx)) {
+               fs->fwd_dropped += (nb_rx - nb_tx);
+               do {
+                       rte_pktmbuf_free(pkts_burst[nb_tx]);
+               } while (++nb_tx < nb_rx);
+       }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       end_tsc = rte_rdtsc();
+       core_cycles = (end_tsc - start_tsc);
+       fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine mac_fwd_engine = {
+       .fwd_mode_name  = "mac",
+       .port_fwd_begin = NULL,
+       .port_fwd_end   = NULL,
+       .packet_fwd     = pkt_burst_mac_forward,
+};
diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c
new file mode 100644 (file)
index 0000000..4c559ef
--- /dev/null
@@ -0,0 +1,646 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <errno.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_etheraddr.h>
+
+#include "testpmd.h"
+
+static void
+usage(char* progname)
+{
+       printf("usage: %s [--interactive|-i] [--help|-h] | ["
+              "--coremask=COREMASK --portmask=PORTMASK --numa "
+              "--eth-peers-configfile= | "
+              "--eth-peer=X,M:M:M:M:M:M | --nb-cores= | --nb-ports= | "
+              "--pkt-filter-mode= |"
+              "--rss-ip | --rss-udp | "
+              "--rxpt= | --rxht= | --rxwt= | --rxfreet= | "
+              "--txpt= | --txht= | --txwt= | --txfreet= | "
+              "--txrst= ]\n",
+              progname);
+       printf("  --interactive: run in interactive mode\n");
+       printf("  --help:   display this message and quit\n");
+       printf("  --eth-peers-configfile=name of file with ethernet addresses "
+              "of peer ports\n");
+       printf("  --eth-peer=X,M:M:M:M:M:M set the mac address of the X peer "
+              "port (0 <= X < %d)\n", RTE_MAX_ETHPORTS);
+       printf("  --nb-cores=N set the number of forwarding cores"
+              " (1 <= N <= %d)\n", nb_lcores);
+       printf("  --nb-ports=N set the number of forwarding ports"
+              " (1 <= N <= %d)\n", nb_ports);
+       printf("  --coremask=COREMASK: hexadecimal bitmask of cores running "
+              "the packet forwarding test\n");
+       printf("  --portmask=PORTMASK: hexadecimal bitmask of ports used "
+              "by the packet forwarding test\n");
+       printf("  --numa: enable NUMA-aware allocation of RX/TX rings and of "
+              " RX memory buffers (mbufs)\n");
+       printf("  --mbuf-size=N set the data size of mbuf to N bytes\n");
+       printf("  --max-pkt-len=N set the maximum size of packet to N bytes\n");
+       printf("  --pkt-filter-mode=N: set Flow director mode "
+              "( N: none (default mode) or signature or perfect)\n");
+       printf("  --pkt-filter-report-hash=N: set Flow director report mode "
+              "( N: none  or match (default) or always)\n");
+       printf("  --pkt-filter-size=N: set Flow director mode "
+              "( N: 64K (default mode) or 128K or 256K)\n");
+       printf("  --pkt-filter-flexbytes-offset=N: set flexbytes-offset."
+              " The offset is defined in word units counted from the"
+              " first byte of the destination Ethernet MAC address."
+              " 0 <= N <= 32\n");
+       printf("  --pkt-filter-drop-queue=N: set drop-queue."
+              " In perfect mode, when you add a rule with queue -1"
+              " the packet will be enqueued into the rx drop-queue."
+              " If the drop-queue doesn't exist, the packet is dropped."
+              " By default drop-queue=127\n");
+       printf("  --crc-strip: enable CRC stripping by hardware\n");
+       printf("  --enable-rx-cksum: enable rx hardware checksum offload\n");
+       printf("  --disable-hw-vlan: disable hardware vlan\n");
+       printf("  --disable-rss: disable rss\n");
+       printf("  --port-topology=N: set port topology (N: paired (default) or "
+              "chained)\n");
+       printf("  --rss-ip:  set RSS functions to IPv4/IPv6 only \n");
+       printf("  --rss-udp: set RSS functions to IPv4/IPv6 + UDP\n");
+       printf("  --rxq=N    set the number of RX queues per port to N\n");
+       printf("  --rxd=N    set the number of descriptors in RX rings to N\n");
+       printf("  --txq=N    set the number of TX queues per port to N\n");
+       printf("  --txd=N    set the number of descriptors in TX rings to N\n");
+       printf("  --burst=N  set the number of packets per burst to N\n");
+       printf("  --mbcache=N  set the cache of mbuf memory pool to N\n");
+       printf("  --rxpt=N   set prefetch threshold register of RX rings to N"
+              " (0 <= N <= 16)\n");
+       printf("  --rxht=N  set the host threshold register of RX rings to N"
+              " (0 <= N <= 16)\n");
+       printf("  --rxfreet=N set the free threshold of RX descriptors to N"
+              " (0 <= N < value of rxd)\n");
+       printf("  --rxwt=N  set the write-back threshold register of RX rings"
+              " to N (0 <= N <= 16)\n");
+       printf("  --txpt=N  set the prefetch threshold register of TX rings"
+              " to N (0 <= N <= 16)\n");
+       printf("  --txht=N  set the nhost threshold register of TX rings to N"
+              " (0 <= N <= 16)\n");
+       printf("  --txwt=N  set the write-back threshold register of TX rings"
+              " to N (0 <= N <= 16)\n");
+       printf("  --txfreet=N set the transmit free threshold of TX rings to N"
+              " (0 <= N <= value of txd)\n");
+       printf("  --txrst=N set the transmit RS bit threshold of TX rings to N"
+              " (0 <= N <= value of txd)\n");
+}
+
+static int
+init_peer_eth_addrs(char *config_filename)
+{
+       FILE *config_file;
+       portid_t i;
+       char buf[50];
+
+       config_file = fopen(config_filename, "r");
+       if (config_file == NULL) {
+               perror("open log file failed\n");
+               return -1;
+       }
+
+       for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+
+               if (fgets(buf, sizeof(buf), config_file) == NULL)
+                       break;
+
+               if (cmdline_parse_etheraddr(NULL, buf, &peer_eth_addrs[i]) < 0 ){
+                       printf("bad format of mac address on line %d\n", i);
+                       fclose(config_file);
+                       return -1;
+               }
+       }
+       fclose(config_file);
+       nb_peer_eth_addrs = (portid_t) i;
+       return 0;
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and set
+ * the global configuration of forwarding cores.
+ */
+static void
+parse_fwd_coremask(const char *coremask)
+{
+       char *end;
+       unsigned long long int cm;
+
+       /* parse hexadecimal string */
+       end = NULL;
+       cm = strtoull(coremask, &end, 16);
+       if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               rte_exit(EXIT_FAILURE, "Invalid fwd core mask\n");
+       else
+               set_fwd_lcores_mask((uint64_t) cm);
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and set
+ * the global configuration of forwarding cores.
+ */
+static void
+parse_fwd_portmask(const char *portmask)
+{
+       char *end;
+       unsigned long long int pm;
+
+       /* parse hexadecimal string */
+       end = NULL;
+       pm = strtoull(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n");
+       else
+               set_fwd_ports_mask((uint64_t) pm);
+}
+
+void
+launch_args_parse(int argc, char** argv)
+{
+       int n, opt;
+       char **argvopt;
+       int opt_idx;
+       static struct option lgopts[] = {
+               { "help",                       0, 0, 0 },
+               { "interactive",                0, 0, 0 },
+               { "eth-peers-configfile",       1, 0, 0 },
+               { "eth-peer",                   1, 0, 0 },
+               { "ports",                      1, 0, 0 },
+               { "nb-cores",                   1, 0, 0 },
+               { "nb-ports",                   1, 0, 0 },
+               { "coremask",                   1, 0, 0 },
+               { "portmask",                   1, 0, 0 },
+               { "numa",                       0, 0, 0 },
+               { "mbuf-size",                  1, 0, 0 },
+               { "max-pkt-len",                1, 0, 0 },
+               { "pkt-filter-mode",            1, 0, 0 },
+               { "pkt-filter-report-hash",     1, 0, 0 },
+               { "pkt-filter-size",            1, 0, 0 },
+               { "pkt-filter-flexbytes-offset",1, 0, 0 },
+               { "pkt-filter-drop-queue",      1, 0, 0 },
+               { "crc-strip",                  0, 0, 0 },
+               { "disable-hw-vlan",            0, 0, 0 },
+               { "disable-rss",                0, 0, 0 },
+               { "port-topology",              1, 0, 0 },
+               { "rss-ip",                     0, 0, 0 },
+               { "rss-udp",                    0, 0, 0 },
+               { "rxq",                        1, 0, 0 },
+               { "txq",                        1, 0, 0 },
+               { "rxd",                        1, 0, 0 },
+               { "txd",                        1, 0, 0 },
+               { "burst",                      1, 0, 0 },
+               { "mbcache",                    1, 0, 0 },
+               { "txpt",                       1, 0, 0 },
+               { "txht",                       1, 0, 0 },
+               { "txwt",                       1, 0, 0 },
+               { "txfreet",                    1, 0, 0 },
+               { "txrst",                      1, 0, 0 },
+               { "rxpt",                       1, 0, 0 },
+               { "rxht",                       1, 0, 0 },
+               { "rxwt",                       1, 0, 0 },
+               { "rxfreet",                    1, 0, 0 },
+               { 0, 0, 0, 0 },
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "ih",
+                                lgopts, &opt_idx)) != EOF) {
+               switch (opt) {
+               case 'i':
+                       printf("Interactive-mode selected\n");
+                       interactive = 1;
+                       break;
+               case 0: /*long options */
+                       if (!strcmp(lgopts[opt_idx].name, "help")) {
+                               usage(argv[0]);
+                               rte_exit(EXIT_SUCCESS, "Displayed help\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "interactive")) {
+                               printf("Interactive-mode selected\n");
+                               interactive = 1;
+                       }
+                       if (!strcmp(lgopts[opt_idx].name,
+                                   "eth-peers-configfile")) {
+                               if (init_peer_eth_addrs(optarg) != 0)
+                                       rte_exit(EXIT_FAILURE,
+                                                "Cannot open logfile\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "eth-peer")) {
+                               char *port_end;
+                               uint8_t c, peer_addr[6];
+
+                               errno = 0;
+                               n = strtoul(optarg, &port_end, 10);
+                               if (errno != 0 || port_end == optarg || *port_end++ != ',')
+                                       rte_exit(EXIT_FAILURE,
+                                                "Invalid eth-peer: %s", optarg);
+                               if (n >= RTE_MAX_ETHPORTS)
+                                       rte_exit(EXIT_FAILURE,
+                                                "eth-peer: port %d >= RTE_MAX_ETHPORTS(%d)\n",
+                                                n, RTE_MAX_ETHPORTS);
+
+                               if (cmdline_parse_etheraddr(NULL, port_end, &peer_addr) < 0 )
+                                       rte_exit(EXIT_FAILURE,
+                                                "Invalid ethernet address: %s\n",
+                                                port_end);
+                               for (c = 0; c < 6; c++)
+                                       peer_eth_addrs[n].addr_bytes[c] =
+                                               peer_addr[c];
+                               nb_peer_eth_addrs++;
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "nb-ports")) {
+                               n = atoi(optarg);
+                               if (n > 0 && n <= nb_ports)
+                                       nb_fwd_ports = (uint8_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "nb-ports should be > 0 and <= %d\n",
+                                                nb_ports);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "nb-cores")) {
+                               n = atoi(optarg);
+                               if (n > 0 && n <= nb_lcores)
+                                       nb_fwd_lcores = (uint8_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "nb-cores should be > 0 and <= %d\n",
+                                                nb_lcores);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "coremask"))
+                               parse_fwd_coremask(optarg);
+                       if (!strcmp(lgopts[opt_idx].name, "portmask"))
+                               parse_fwd_portmask(optarg);
+                       if (!strcmp(lgopts[opt_idx].name, "numa"))
+                               numa_support = 1;
+                       if (!strcmp(lgopts[opt_idx].name, "mbuf-size")) {
+                               n = atoi(optarg);
+                               if (n > 0 && n <= 0xFFFF)
+                                       mbuf_data_size = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "mbuf-size should be > 0 and < 65536\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
+                               n = atoi(optarg);
+                               if (n >= ETHER_MIN_LEN) {
+                                       rx_mode.max_rx_pkt_len = (uint32_t) n;
+                                       if (n > ETHER_MAX_LEN)
+                                           rx_mode.jumbo_frame = 1;
+                               } else
+                                       rte_exit(EXIT_FAILURE,
+                                                "Invalid max-pkt-len=%d - should be > %d\n",
+                                                n, ETHER_MIN_LEN);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "pkt-filter-mode")) {
+                               if (!strcmp(optarg, "signature"))
+                                       fdir_conf.mode =
+                                               RTE_FDIR_MODE_SIGNATURE;
+                               else if (!strcmp(optarg, "perfect"))
+                                       fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
+                               else if (!strcmp(optarg, "none"))
+                                       fdir_conf.mode = RTE_FDIR_MODE_NONE;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "pkt-mode-invalid %s invalid - must be: "
+                                                "none, signature or perfect\n",
+                                                optarg);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name,
+                                   "pkt-filter-report-hash")) {
+                               if (!strcmp(optarg, "none"))
+                                       fdir_conf.status =
+                                               RTE_FDIR_NO_REPORT_STATUS;
+                               else if (!strcmp(optarg, "match"))
+                                       fdir_conf.status =
+                                               RTE_FDIR_REPORT_STATUS;
+                               else if (!strcmp(optarg, "always"))
+                                       fdir_conf.status =
+                                               RTE_FDIR_REPORT_STATUS_ALWAYS;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "pkt-filter-report-hash %s invalid "
+                                                "- must be: none or match or always\n",
+                                                optarg);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "pkt-filter-size")) {
+                               if (!strcmp(optarg, "64K"))
+                                       fdir_conf.pballoc =
+                                               RTE_FDIR_PBALLOC_64K;
+                               else if (!strcmp(optarg, "128K"))
+                                       fdir_conf.pballoc =
+                                               RTE_FDIR_PBALLOC_128K;
+                               else if (!strcmp(optarg, "256K"))
+                                       fdir_conf.pballoc =
+                                               RTE_FDIR_PBALLOC_256K;
+                               else
+                                       rte_exit(EXIT_FAILURE, "pkt-filter-size %s invalid -"
+                                                " must be: 64K or 128K or 256K\n",
+                                                optarg);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name,
+                                   "pkt-filter-flexbytes-offset")) {
+                               n = atoi(optarg);
+                               if ( n >= 0 && n <= (int) 32)
+                                       fdir_conf.flexbytes_offset =
+                                               (uint8_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "flexbytes %d invalid - must"
+                                                "be  >= 0 && <= 32\n", n);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name,
+                                   "pkt-filter-drop-queue")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       fdir_conf.drop_queue = (uint8_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "drop queue %d invalid - must"
+                                                "be >= 0 \n", n);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "crc-strip"))
+                               rx_mode.hw_strip_crc = 1;
+                       if (!strcmp(lgopts[opt_idx].name, "enable-rx-cksum"))
+                               rx_mode.hw_ip_checksum = 1;
+                       if (!strcmp(lgopts[opt_idx].name, "disable-hw-vlan"))
+                               rx_mode.hw_vlan_filter = 0;
+                       if (!strcmp(lgopts[opt_idx].name, "disable-rss"))
+                               rss_hf = 0;
+                       if (!strcmp(lgopts[opt_idx].name, "port-topology")) {
+                               if (!strcmp(optarg, "paired"))
+                                       port_topology = PORT_TOPOLOGY_PAIRED;
+                               else if (!strcmp(optarg, "chained"))
+                                       port_topology = PORT_TOPOLOGY_CHAINED;
+                               else
+                                       rte_exit(EXIT_FAILURE, "port-topology %s invalid -"
+                                                " must be: paired or chained \n",
+                                                optarg);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rss-ip"))
+                               rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6;
+                       if (!strcmp(lgopts[opt_idx].name, "rss-udp"))
+                               rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6 |
+                                       ETH_RSS_IPV4_UDP;
+                       if (!strcmp(lgopts[opt_idx].name, "rxq")) {
+                               n = atoi(optarg);
+                               if (n >= 1 && n <= (int) MAX_QUEUE_ID)
+                                       nb_rxq = (queueid_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxq %d invalid - must be"
+                                                 " >= 1 && <= %d\n", n,
+                                                 (int) MAX_QUEUE_ID);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txq")) {
+                               n = atoi(optarg);
+                               if (n >= 1 && n <= (int) MAX_QUEUE_ID)
+                                       nb_txq = (queueid_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txq %d invalid - must be"
+                                                 " >= 1 && <= %d\n", n,
+                                                 (int) MAX_QUEUE_ID);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxd")) {
+                               n = atoi(optarg);
+                               if (n > 0)
+                                       nb_rxd = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxd must be > 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txd")) {
+                               n = atoi(optarg);
+                               if (n > 0)
+                                       nb_txd = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txd must be in > 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "burst")) {
+                               n = atoi(optarg);
+                               if ((n >= 1) && (n <= MAX_PKT_BURST))
+                                       nb_pkt_per_burst = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "burst must >= 1 and <= %d]",
+                                                MAX_PKT_BURST);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "mbcache")) {
+                               n = atoi(optarg);
+                               if ((n >= 0) &&
+                                   (n <= RTE_MEMPOOL_CACHE_MAX_SIZE))
+                                       mb_mempool_cache = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE,
+                                                "mbcache must be >= 0 and <= %d\n",
+                                                RTE_MEMPOOL_CACHE_MAX_SIZE);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txpt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.pthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txpt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txht")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.hthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txht must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txwt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.wthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txwt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txfreet")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_free_thresh = (uint16_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txfreet must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txrst")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_rs_thresh = (uint16_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txrst must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxpt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.pthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxht")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.hthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxht must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxwt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.wthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxd")) {
+                               n = atoi(optarg);
+                               if (n > 0) {
+                                       if (rx_free_thresh >= n)
+                                               rte_exit(EXIT_FAILURE,
+                                                        "rxd must be > "
+                                                        "rx_free_thresh(%d)\n",
+                                                        (int)rx_free_thresh);
+                                       else
+                                               nb_rxd = (uint16_t) n;
+                               } else
+                                       rte_exit(EXIT_FAILURE,
+                                                "rxd(%d) invalid - must be > 0\n",
+                                                n);
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txd")) {
+                               n = atoi(optarg);
+                               if (n > 0)
+                                       nb_txd = (uint16_t) n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txd must be in > 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txpt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.pthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txpt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txht")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.hthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txht must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "txwt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       tx_thresh.wthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "txwt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxpt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.pthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxpt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxht")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.hthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxht must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxwt")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_thresh.wthresh = (uint8_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxwt must be >= 0\n");
+                       }
+                       if (!strcmp(lgopts[opt_idx].name, "rxfreet")) {
+                               n = atoi(optarg);
+                               if (n >= 0)
+                                       rx_free_thresh = (uint16_t)n;
+                               else
+                                       rte_exit(EXIT_FAILURE, "rxfreet must be >= 0\n");
+                       }
+                       break;
+               case 'h':
+                       usage(argv[0]);
+                       rte_exit(EXIT_SUCCESS, "Displayed help\n");
+                       break;
+               default:
+                       usage(argv[0]);
+                       rte_exit(EXIT_FAILURE,
+                                "Command line is incomplete or incorrect\n");
+                       break;
+               }
+       }
+}
diff --git a/app/test-pmd/rxonly.c b/app/test-pmd/rxonly.c
new file mode 100644 (file)
index 0000000..d1b1289
--- /dev/null
@@ -0,0 +1,194 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+#define MAX_PKT_RX_FLAGS 11
+static const char *pkt_rx_flag_names[MAX_PKT_RX_FLAGS] = {
+       "VLAN_PKT",
+       "RSS_HASH",
+       "PKT_RX_FDIR",
+       "IP_CKSUM",
+       "IP_CKSUM_BAD",
+
+       "IPV4_HDR",
+       "IPV4_HDR_EXT",
+       "IPV6_HDR",
+       "IPV6_HDR_EXT",
+
+       "IEEE1588_PTP",
+       "IEEE1588_TMST",
+};
+
+static inline void
+print_ether_addr(const char *what, struct ether_addr *eth_addr)
+{
+       printf("%s%02X:%02X:%02X:%02X:%02X:%02X",
+              what,
+              eth_addr->addr_bytes[0],
+              eth_addr->addr_bytes[1],
+              eth_addr->addr_bytes[2],
+              eth_addr->addr_bytes[3],
+              eth_addr->addr_bytes[4],
+              eth_addr->addr_bytes[5]);
+}
+
+/*
+ * Received a burst of packets.
+ */
+static void
+pkt_burst_receive(struct fwd_stream *fs)
+{
+       struct rte_mbuf  *pkts_burst[MAX_PKT_BURST];
+       struct rte_mbuf  *mb;
+       struct ether_hdr *eth_hdr;
+       uint16_t eth_type;
+       uint16_t ol_flags;
+       uint16_t nb_rx;
+       uint16_t i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t start_tsc;
+       uint64_t end_tsc;
+       uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       start_tsc = rte_rdtsc();
+#endif
+
+       /*
+        * Receive a burst of packets.
+        */
+       nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
+                                nb_pkt_per_burst);
+       if (unlikely(nb_rx == 0))
+               return;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
+#endif
+       fs->rx_packets += nb_rx;
+
+       /*
+        * Dump each received packet if verbose_level > 0.
+        */
+       if (verbose_level > 0)
+               printf("port %u/queue %u: received %u packets\n",
+                      (unsigned) fs->rx_port,
+                      (unsigned) fs->rx_queue,
+                      (unsigned) nb_rx);
+       for (i = 0; i < nb_rx; i++) {
+               mb = pkts_burst[i];
+               if (verbose_level == 0) {
+                       rte_pktmbuf_free(mb);
+                       continue;
+               }
+               eth_hdr = (struct ether_hdr *) mb->pkt.data;
+               eth_type = RTE_BE_TO_CPU_16(eth_hdr->ether_type);
+               ol_flags = mb->ol_flags;
+               print_ether_addr("  src=", &eth_hdr->s_addr);
+               print_ether_addr(" - dst=", &eth_hdr->d_addr);
+               printf(" - type=0x%04x - length=%u - nb_segs=%d",
+                      eth_type, (unsigned) mb->pkt.pkt_len,
+                      (int)mb->pkt.nb_segs);
+               if (ol_flags & PKT_RX_RSS_HASH)
+                       printf(" - RSS hash=0x%x", (unsigned) mb->pkt.hash.rss);
+               else if (ol_flags & PKT_RX_FDIR)
+                       printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
+                              mb->pkt.hash.fdir.hash, mb->pkt.hash.fdir.id);
+               if (ol_flags & PKT_RX_VLAN_PKT)
+                       printf(" - VLAN tci=0x%x", mb->pkt.vlan_tci);
+               printf("\n");
+               if (ol_flags != 0) {
+                       int rxf;
+
+                       for (rxf = 0; rxf < MAX_PKT_RX_FLAGS; rxf++) {
+                               if (ol_flags & (1 << rxf))
+                                       printf("  PKT_RX_%s\n",
+                                              pkt_rx_flag_names[rxf]);
+                       }
+               }
+               rte_pktmbuf_free(mb);
+       }
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       end_tsc = rte_rdtsc();
+       core_cycles = (end_tsc - start_tsc);
+       fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+struct fwd_engine rx_only_engine = {
+       .fwd_mode_name  = "rxonly",
+       .port_fwd_begin = NULL,
+       .port_fwd_end   = NULL,
+       .packet_fwd     = pkt_burst_receive,
+};
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
new file mode 100644 (file)
index 0000000..6813b66
--- /dev/null
@@ -0,0 +1,1105 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <string.h>
+#include <time.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+uint16_t verbose_level = 0; /**< Silent by default. */
+
+/* use master core for command line ? */
+uint8_t interactive = 0;
+
+/*
+ * NUMA support configuration.
+ * When set, the NUMA support attempts to dispatch the allocation of the
+ * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
+ * probed ports among the CPU sockets 0 and 1.
+ * Otherwise, all memory is allocated from CPU socket 0.
+ */
+uint8_t numa_support = 0; /**< No numa support by default */
+
+/*
+ * Record the Ethernet address of peer target ports to which packets are
+ * forwarded.
+ * Must be instanciated with the ethernet addresses of peer traffic generator
+ * ports.
+ */
+struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+portid_t nb_peer_eth_addrs = 0;
+
+/*
+ * Probed Target Environment.
+ */
+struct rte_port *ports;               /**< For all probed ethernet ports. */
+portid_t nb_ports;             /**< Number of probed ethernet ports. */
+struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
+lcoreid_t nb_lcores;           /**< Number of probed logical cores. */
+
+/*
+ * Test Forwarding Configuration.
+ *    nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
+ *    nb_fwd_ports  <= nb_cfg_ports  <= nb_ports
+ */
+lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
+lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
+portid_t  nb_cfg_ports;  /**< Number of configured ports. */
+portid_t  nb_fwd_ports;  /**< Number of forwarding ports. */
+
+unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
+portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];      /**< Port ids configuration. */
+
+struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
+streamid_t nb_fwd_streams;       /**< Is equal to (nb_ports * nb_rxq). */
+
+/*
+ * Forwarding engines.
+ */
+struct fwd_engine * fwd_engines[] = {
+       &io_fwd_engine,
+       &mac_fwd_engine,
+       &rx_only_engine,
+       &tx_only_engine,
+       &csum_fwd_engine,
+#ifdef RTE_LIBRTE_IEEE1588
+       &ieee1588_fwd_engine,
+#endif
+       NULL,
+};
+
+struct fwd_config cur_fwd_config;
+struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
+
+uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
+
+/*
+ * Configuration of packet segments used by the "txonly" processing engine.
+ */
+uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
+uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
+       TXONLY_DEF_PACKET_LEN,
+};
+uint8_t  tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
+
+uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
+uint16_t mb_mempool_cache = DEF_PKT_BURST; /**< Size of mbuf mempool cache. */
+
+/*
+ * Ethernet Ports Configuration.
+ */
+int promiscuous_on = 1; /**< Ports set in promiscuous mode by default. */
+
+/*
+ * Configurable number of RX/TX queues.
+ */
+queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
+queueid_t nb_txq = 1; /**< Number of TX queues per port. */
+
+/*
+ * Configurable number of RX/TX ring descriptors.
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
+uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
+
+/*
+ * Configurable values of RX and TX ring threshold registers.
+ */
+#define RX_PTHRESH 8 /**< Default value of RX prefetch threshold register. */
+#define RX_HTHRESH 8 /**< Default value of RX host threshold register. */
+#define RX_WTHRESH 4 /**< Default value of RX write-back threshold register. */
+
+#define TX_PTHRESH 36 /**< Default value of TX prefetch threshold register. */
+#define TX_HTHRESH 0 /**< Default value of TX host threshold register. */
+#define TX_WTHRESH 0 /**< Default value of TX write-back threshold register. */
+
+struct rte_eth_thresh rx_thresh = {
+       .pthresh = RX_PTHRESH,
+       .hthresh = RX_HTHRESH,
+       .wthresh = RX_WTHRESH,
+};
+
+struct rte_eth_thresh tx_thresh = {
+       .pthresh = TX_PTHRESH,
+       .hthresh = TX_HTHRESH,
+       .wthresh = TX_WTHRESH,
+};
+
+/*
+ * Configurable value of RX free threshold.
+ */
+uint16_t rx_free_thresh = 0; /* Immediately free RX descriptors by default. */
+
+/*
+ * Configurable value of TX free threshold.
+ */
+uint16_t tx_free_thresh = 0; /* Use default values. */
+
+/*
+ * Configurable value of TX RS bit threshold.
+ */
+uint16_t tx_rs_thresh = 0; /* Use default values. */
+
+/*
+ * Receive Side Scaling (RSS) configuration.
+ */
+uint16_t rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6; /* RSS IP by default. */
+
+/*
+ * Port topology configuration
+ */
+uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
+
+/*
+ * Ethernet device configuration.
+ */
+struct rte_eth_rxmode rx_mode = {
+       .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
+       .split_hdr_size = 0,
+       .header_split   = 0, /**< Header Split disabled. */
+       .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
+       .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
+       .jumbo_frame    = 0, /**< Jumbo Frame Support disabled. */
+       .hw_strip_crc   = 0, /**< CRC stripping by hardware disabled. */
+};
+
+struct rte_fdir_conf fdir_conf = {
+       .mode = RTE_FDIR_MODE_NONE,
+       .pballoc = RTE_FDIR_PBALLOC_64K,
+       .status = RTE_FDIR_REPORT_STATUS,
+       .flexbytes_offset = 0x6,
+       .drop_queue = 127,
+};
+
+static volatile int test_done = 1; /* stop packet forwarding when set to 1. */
+
+/*
+ * Setup default configuration.
+ */
+static void
+set_default_fwd_lcores_config(void)
+{
+       unsigned int i;
+       unsigned int nb_lc;
+
+       nb_lc = 0;
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if (! rte_lcore_is_enabled(i))
+                       continue;
+               if (i == rte_get_master_lcore())
+                       continue;
+               fwd_lcores_cpuids[nb_lc++] = i;
+       }
+       nb_lcores = (lcoreid_t) nb_lc;
+       nb_cfg_lcores = nb_lcores;
+       nb_fwd_lcores = 1;
+}
+
+static void
+set_def_peer_eth_addrs(void)
+{
+       portid_t i;
+
+       for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+               peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
+               peer_eth_addrs[i].addr_bytes[5] = i;
+       }
+}
+
+static void
+set_default_fwd_ports_config(void)
+{
+       portid_t pt_id;
+
+       for (pt_id = 0; pt_id < nb_ports; pt_id++)
+               fwd_ports_ids[pt_id] = pt_id;
+
+       nb_cfg_ports = nb_ports;
+       nb_fwd_ports = nb_ports;
+}
+
+void
+set_def_fwd_config(void)
+{
+       set_default_fwd_lcores_config();
+       set_def_peer_eth_addrs();
+       set_default_fwd_ports_config();
+}
+
+/*
+ * Configuration initialisation done once at init time.
+ */
+struct mbuf_ctor_arg {
+       uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
+       uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
+};
+
+struct mbuf_pool_ctor_arg {
+       uint16_t seg_buf_size; /**< size of data segment in mbuf. */
+};
+
+static void
+testpmd_mbuf_ctor(struct rte_mempool *mp,
+                 void *opaque_arg,
+                 void *raw_mbuf,
+                 __attribute__((unused)) unsigned i)
+{
+       struct mbuf_ctor_arg *mb_ctor_arg;
+       struct rte_mbuf    *mb;
+
+       mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
+       mb = (struct rte_mbuf *) raw_mbuf;
+
+       mb->pool         = mp;
+       mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
+       mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
+                       mb_ctor_arg->seg_buf_offset);
+       mb->buf_len      = mb_ctor_arg->seg_buf_size;
+       mb->type         = RTE_MBUF_PKT;
+       mb->ol_flags     = 0;
+       mb->pkt.data     = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
+       mb->pkt.nb_segs  = 1;
+       mb->pkt.l2_len = 0;
+       mb->pkt.l3_len = 0;
+       mb->pkt.vlan_tci = 0;
+       mb->pkt.hash.rss = 0;
+}
+
+static void
+testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
+                      void *opaque_arg)
+{
+       struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+
+       if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
+               printf("%s(%s) private_data_size %d < %d\n",
+                      __func__, mp->name, (int) mp->private_data_size,
+                      (int) sizeof(struct rte_pktmbuf_pool_private));
+               return;
+       }
+       mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
+       mbp_priv = (struct rte_pktmbuf_pool_private *)
+               ((char *)mp + sizeof(struct rte_mempool));
+       mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
+}
+
+static void
+mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
+                unsigned int socket_id)
+{
+       char pool_name[RTE_MEMPOOL_NAMESIZE];
+       struct rte_mempool *rte_mp;
+       struct mbuf_pool_ctor_arg mbp_ctor_arg;
+       struct mbuf_ctor_arg mb_ctor_arg;
+       uint32_t mb_size;
+
+       mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
+                                               mbuf_seg_size);
+       mb_ctor_arg.seg_buf_offset =
+               (uint16_t) CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
+       mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
+       mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
+       mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
+       rte_mp = rte_mempool_create(pool_name, nb_mbuf, (unsigned) mb_size,
+                                   (unsigned) mb_mempool_cache,
+                                   sizeof(struct rte_pktmbuf_pool_private),
+                                   testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
+                                   testpmd_mbuf_ctor, &mb_ctor_arg,
+                                   socket_id, 0);
+       if (rte_mp == NULL) {
+               rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u failed\n",
+                      socket_id);
+       }
+}
+
+static void
+init_config(void)
+{
+       struct rte_port *port;
+       struct rte_mempool *mbp;
+       unsigned int nb_mbuf_per_pool;
+       streamid_t sm_id;
+       lcoreid_t  lc_id;
+       portid_t   pt_id;
+
+       /* Configuration of logical cores. */
+       fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
+                               sizeof(struct fwd_lcore *) * nb_lcores,
+                               CACHE_LINE_SIZE);
+       if (fwd_lcores == NULL) {
+               rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) failed\n",
+                      nb_lcores);
+       }
+       for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
+               fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
+                                              sizeof(struct fwd_lcore),
+                                              CACHE_LINE_SIZE);
+               if (fwd_lcores[lc_id] == NULL) {
+                       rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) failed\n");
+               }
+               fwd_lcores[lc_id]->cpuid_idx = lc_id;
+       }
+
+       /*
+        * Create pools of mbuf.
+        * If NUMA support is disabled, create a single pool of mbuf in
+        * socket 0 memory.
+        * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
+        */
+       nb_mbuf_per_pool = nb_rxd + (nb_lcores * mb_mempool_cache) +
+               nb_txd + MAX_PKT_BURST;
+       if (numa_support) {
+               nb_mbuf_per_pool = nb_mbuf_per_pool * (nb_ports >> 1);
+               mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+               mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 1);
+       } else {
+               nb_mbuf_per_pool = (nb_mbuf_per_pool * nb_ports);
+               mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+       }
+
+       /*
+        * Records which Mbuf pool to use by each logical core, if needed.
+        */
+       for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
+               mbp = mbuf_pool_find(rte_lcore_to_socket_id(lc_id));
+               if (mbp == NULL)
+                       mbp = mbuf_pool_find(0);
+               fwd_lcores[lc_id]->mbp = mbp;
+       }
+
+       /* Configuration of Ethernet ports. */
+       ports = rte_zmalloc("testpmd: ports",
+                           sizeof(struct rte_port) * nb_ports,
+                           CACHE_LINE_SIZE);
+       if (ports == NULL) {
+               rte_exit(EXIT_FAILURE, "rte_zmalloc(%d struct rte_port) failed\n",
+                      nb_ports);
+       }
+       port = ports;
+       for (pt_id = 0; pt_id < nb_ports; pt_id++, port++) {
+               rte_eth_dev_info_get(pt_id, &port->dev_info);
+               if (nb_rxq > port->dev_info.max_rx_queues) {
+                       rte_exit(EXIT_FAILURE, "Port %d: max RX queues %d < nb_rxq %d\n",
+                              (int) pt_id,
+                              (int) port->dev_info.max_rx_queues,
+                              (int) nb_rxq);
+               }
+               if (nb_txq > port->dev_info.max_tx_queues) {
+                       rte_exit(EXIT_FAILURE, "Port %d: max TX queues %d < nb_txq %d\n",
+                              (int) pt_id,
+                              (int) port->dev_info.max_tx_queues,
+                              (int) nb_txq);
+               }
+
+               if (numa_support)
+                       port->socket_id = (pt_id < (nb_ports >> 1)) ? 0 : 1;
+               else
+                       port->socket_id = 0;
+       }
+
+       /* Configuration of packet forwarding streams. */
+       nb_fwd_streams = (streamid_t) (nb_ports * nb_rxq);
+       fwd_streams = rte_zmalloc("testpmd: fwd_streams",
+                                 sizeof(struct fwd_stream *) * nb_fwd_streams,
+                                 CACHE_LINE_SIZE);
+       if (fwd_streams == NULL) {
+               rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) failed\n",
+                      nb_fwd_streams);
+       }
+       for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+               fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
+                                                sizeof(struct fwd_stream),
+                                                CACHE_LINE_SIZE);
+               if (fwd_streams[sm_id] == NULL) {
+                       rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream) failed\n");
+               }
+       }
+}
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+static void
+pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
+{
+       unsigned int total_burst;
+       unsigned int nb_burst;
+       unsigned int burst_stats[3];
+       uint16_t pktnb_stats[3];
+       uint16_t nb_pkt;
+       int burst_percent[3];
+
+       /*
+        * First compute the total number of packet bursts and the
+        * two highest numbers of bursts of the same number of packets.
+        */
+       total_burst = 0;
+       burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
+       pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
+       for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
+               nb_burst = pbs->pkt_burst_spread[nb_pkt];
+               if (nb_burst == 0)
+                       continue;
+               total_burst += nb_burst;
+               if (nb_burst > burst_stats[0]) {
+                       burst_stats[1] = burst_stats[0];
+                       pktnb_stats[1] = pktnb_stats[0];
+                       burst_stats[0] = nb_burst;
+                       pktnb_stats[0] = nb_pkt;
+               }
+       }
+       if (total_burst == 0)
+               return;
+       burst_percent[0] = (burst_stats[0] * 100) / total_burst;
+       printf("  %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
+              burst_percent[0], (int) pktnb_stats[0]);
+       if (burst_stats[0] == total_burst) {
+               printf("]\n");
+               return;
+       }
+       if (burst_stats[0] + burst_stats[1] == total_burst) {
+               printf(" + %d%% of %d pkts]\n",
+                      100 - burst_percent[0], pktnb_stats[1]);
+               return;
+       }
+       burst_percent[1] = (burst_stats[1] * 100) / total_burst;
+       burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
+       if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
+               printf(" + %d%% of others]\n", 100 - burst_percent[0]);
+               return;
+       }
+       printf(" + %d%% of %d pkts + %d%% of others]\n",
+              burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
+}
+#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
+
+static void
+fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
+{
+       struct rte_port *port;
+
+       static const char *fwd_stats_border = "----------------------";
+
+       port = &ports[port_id];
+       printf("\n  %s Forward statistics for port %-2d %s\n",
+               fwd_stats_border, port_id, fwd_stats_border);
+       printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+              "%-"PRIu64"\n",
+              stats->ipackets, stats->ierrors,
+              (uint64_t) (stats->ipackets + stats->ierrors));
+
+       if (cur_fwd_eng == &csum_fwd_engine)
+               printf("  Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
+                               port->rx_bad_ip_csum, port->rx_bad_l4_csum);
+
+       printf("  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+              "%-"PRIu64"\n",
+              stats->opackets, port->tx_dropped,
+              (uint64_t) (stats->opackets + port->tx_dropped));
+
+       if (stats->rx_nombuf > 0)
+               printf("  RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       if (port->rx_stream)
+               pkt_burst_stats_display("RX", &port->rx_stream->rx_burst_stats);
+       if (port->tx_stream)
+               pkt_burst_stats_display("TX", &port->tx_stream->tx_burst_stats);
+#endif
+       /* stats fdir */
+       if (fdir_conf.mode != RTE_FDIR_MODE_NONE)
+               printf("  Fdirmiss: %-14"PRIu64"   Fdirmatch: %-14"PRIu64"\n",
+                      stats->fdirmiss,
+                      stats->fdirmatch);
+
+       printf("  %s--------------------------------%s\n",
+              fwd_stats_border, fwd_stats_border);
+}
+
+static void
+fwd_stream_stats_display(streamid_t stream_id)
+{
+       struct fwd_stream *fs;
+       static const char *fwd_top_stats_border = "-------";
+
+       fs = fwd_streams[stream_id];
+       if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
+           (fs->fwd_dropped == 0))
+               return;
+       printf("\n  %s Forward Stats for RX Port=%2d/Queue=%2d -> "
+              "TX Port=%2d/Queue=%2d %s\n",
+              fwd_top_stats_border, fs->rx_port, fs->rx_queue,
+              fs->tx_port, fs->tx_queue, fwd_top_stats_border);
+       printf("  RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
+              fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
+
+       /* if checksum mode */
+       if (cur_fwd_eng == &csum_fwd_engine) {
+              printf("  RX- bad IP checksum: %-14u  Rx- bad L4 checksum: %-14u\n",
+              fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
+       }
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       pkt_burst_stats_display("RX", &fs->rx_burst_stats);
+       pkt_burst_stats_display("TX", &fs->tx_burst_stats);
+#endif
+}
+
+static void
+flush_all_rx_queues(void)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       portid_t  rxp;
+       queueid_t rxq;
+       uint16_t  nb_rx;
+       uint16_t  i;
+       uint8_t   j;
+
+       for (j = 0; j < 2; j++) {
+               for (rxp = 0; rxp < nb_ports; rxp++) {
+                       for (rxq = 0; rxq < nb_rxq; rxq++) {
+                               do {
+                                       nb_rx = rte_eth_rx_burst(rxp, rxq,
+                                                                pkts_burst,
+                                                                MAX_PKT_BURST);
+                                       for (i = 0; i < nb_rx; i++)
+                                               rte_pktmbuf_free(pkts_burst[i]);
+                               } while (nb_rx > 0);
+                       }
+               }
+               rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
+       }
+}
+
+static void
+run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
+{
+       struct fwd_stream **fsm;
+       streamid_t nb_fs;
+       streamid_t sm_id;
+
+       fsm = &fwd_streams[fc->stream_idx];
+       nb_fs = fc->stream_nb;
+       do {
+               for (sm_id = 0; sm_id < nb_fs; sm_id++)
+                       (*pkt_fwd)(fsm[sm_id]);
+       } while (! fc->stopped);
+}
+
+static int
+start_pkt_forward_on_core(void *fwd_arg)
+{
+       run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
+                            cur_fwd_config.fwd_eng->packet_fwd);
+       return 0;
+}
+
+/*
+ * Run the TXONLY packet forwarding engine to send a single burst of packets.
+ * Used to start communication flows in network loopback test configurations.
+ */
+static int
+run_one_txonly_burst_on_core(void *fwd_arg)
+{
+       struct fwd_lcore *fwd_lc;
+       struct fwd_lcore tmp_lcore;
+
+       fwd_lc = (struct fwd_lcore *) fwd_arg;
+       tmp_lcore = *fwd_lc;
+       tmp_lcore.stopped = 1;
+       run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
+       return 0;
+}
+
+/*
+ * Launch packet forwarding:
+ *     - Setup per-port forwarding context.
+ *     - launch logical cores with their forwarding configuration.
+ */
+static void
+launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
+{
+       port_fwd_begin_t port_fwd_begin;
+       unsigned int i;
+       unsigned int lc_id;
+       int diag;
+
+       port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
+       if (port_fwd_begin != NULL) {
+               for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+                       (*port_fwd_begin)(fwd_ports_ids[i]);
+       }
+       for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
+               lc_id = fwd_lcores_cpuids[i];
+               if ((interactive == 0) || (lc_id != rte_lcore_id())) {
+                       fwd_lcores[i]->stopped = 0;
+                       diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
+                                                    fwd_lcores[i], lc_id);
+                       if (diag != 0)
+                               printf("launch lcore %u failed - diag=%d\n",
+                                      lc_id, diag);
+               }
+       }
+}
+
+/*
+ * Launch packet forwarding configuration.
+ */
+void
+start_packet_forwarding(int with_tx_first)
+{
+       port_fwd_begin_t port_fwd_begin;
+       port_fwd_end_t  port_fwd_end;
+       struct rte_port *port;
+       unsigned int i;
+       portid_t   pt_id;
+       streamid_t sm_id;
+
+       if (test_done == 0) {
+               printf("Packet forwarding already started\n");
+               return;
+       }
+       test_done = 0;
+       flush_all_rx_queues();
+       fwd_config_setup();
+       rxtx_config_display();
+
+       for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+               pt_id = fwd_ports_ids[i];
+               port = &ports[pt_id];
+               rte_eth_stats_get(pt_id, &port->stats);
+               port->tx_dropped = 0;
+       }
+       for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+               fwd_streams[sm_id]->rx_packets = 0;
+               fwd_streams[sm_id]->tx_packets = 0;
+               fwd_streams[sm_id]->fwd_dropped = 0;
+               fwd_streams[sm_id]->rx_bad_ip_csum = 0;
+               fwd_streams[sm_id]->rx_bad_l4_csum = 0;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+               memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
+                      sizeof(fwd_streams[sm_id]->rx_burst_stats));
+               memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
+                      sizeof(fwd_streams[sm_id]->tx_burst_stats));
+#endif
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+               fwd_streams[sm_id]->core_cycles = 0;
+#endif
+       }
+       if (with_tx_first) {
+               port_fwd_begin = tx_only_engine.port_fwd_begin;
+               if (port_fwd_begin != NULL) {
+                       for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+                               (*port_fwd_begin)(fwd_ports_ids[i]);
+               }
+               launch_packet_forwarding(run_one_txonly_burst_on_core);
+               rte_eal_mp_wait_lcore();
+               port_fwd_end = tx_only_engine.port_fwd_end;
+               if (port_fwd_end != NULL) {
+                       for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
+                               (*port_fwd_end)(fwd_ports_ids[i]);
+               }
+       }
+       launch_packet_forwarding(start_pkt_forward_on_core);
+}
+
+void
+stop_packet_forwarding(void)
+{
+       struct rte_eth_stats stats;
+       struct rte_port *port;
+       port_fwd_end_t  port_fwd_end;
+       int i;
+       portid_t   pt_id;
+       streamid_t sm_id;
+       lcoreid_t  lc_id;
+       uint64_t total_recv;
+       uint64_t total_xmit;
+       uint64_t total_rx_dropped;
+       uint64_t total_tx_dropped;
+       uint64_t total_rx_nombuf;
+       uint64_t tx_dropped;
+       uint64_t rx_bad_ip_csum;
+       uint64_t rx_bad_l4_csum;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t fwd_cycles;
+#endif
+       static const char *acc_stats_border = "+++++++++++++++";
+
+       if (test_done) {
+               printf("Packet forwarding not started\n");
+               return;
+       }
+       printf("Telling cores to stop...");
+       for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
+               fwd_lcores[lc_id]->stopped = 1;
+       printf("\nWaiting for lcores to finish...\n");
+       rte_eal_mp_wait_lcore();
+       port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
+       if (port_fwd_end != NULL) {
+               for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+                       pt_id = fwd_ports_ids[i];
+                       (*port_fwd_end)(pt_id);
+               }
+       }
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       fwd_cycles = 0;
+#endif
+       for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+               if (cur_fwd_config.nb_fwd_streams >
+                   cur_fwd_config.nb_fwd_ports) {
+                       fwd_stream_stats_display(sm_id);
+                       ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
+                       ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
+               } else {
+                       ports[fwd_streams[sm_id]->tx_port].tx_stream =
+                               fwd_streams[sm_id];
+                       ports[fwd_streams[sm_id]->rx_port].rx_stream =
+                               fwd_streams[sm_id];
+               }
+               tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
+               tx_dropped = (uint64_t) (tx_dropped +
+                                        fwd_streams[sm_id]->fwd_dropped);
+               ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
+
+               rx_bad_ip_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
+               rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
+                                        fwd_streams[sm_id]->rx_bad_ip_csum);
+               ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = rx_bad_ip_csum;
+
+               rx_bad_l4_csum = ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
+               rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
+                                        fwd_streams[sm_id]->rx_bad_l4_csum);
+               ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = rx_bad_l4_csum;
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+               fwd_cycles = (uint64_t) (fwd_cycles +
+                                        fwd_streams[sm_id]->core_cycles);
+#endif
+       }
+       total_recv = 0;
+       total_xmit = 0;
+       total_rx_dropped = 0;
+       total_tx_dropped = 0;
+       total_rx_nombuf  = 0;
+       for (i = 0; i < ((cur_fwd_config.nb_fwd_ports + 1) & ~0x1); i++) {
+               pt_id = fwd_ports_ids[i];
+
+               port = &ports[pt_id];
+               rte_eth_stats_get(pt_id, &stats);
+               stats.ipackets -= port->stats.ipackets;
+               port->stats.ipackets = 0;
+               stats.opackets -= port->stats.opackets;
+               port->stats.opackets = 0;
+               stats.ibytes   -= port->stats.ibytes;
+               port->stats.ibytes = 0;
+               stats.obytes   -= port->stats.obytes;
+               port->stats.obytes = 0;
+               stats.ierrors  -= port->stats.ierrors;
+               port->stats.ierrors = 0;
+               stats.oerrors  -= port->stats.oerrors;
+               port->stats.oerrors = 0;
+               stats.rx_nombuf -= port->stats.rx_nombuf;
+               port->stats.rx_nombuf = 0;
+               stats.fdirmatch -= port->stats.fdirmatch;
+               port->stats.rx_nombuf = 0;
+               stats.fdirmiss -= port->stats.fdirmiss;
+               port->stats.rx_nombuf = 0;
+
+               total_recv += stats.ipackets;
+               total_xmit += stats.opackets;
+               total_rx_dropped += stats.ierrors;
+               total_tx_dropped += port->tx_dropped;
+               total_rx_nombuf  += stats.rx_nombuf;
+
+               fwd_port_stats_display(pt_id, &stats);
+       }
+       printf("\n  %s Accumulated forward statistics for all ports"
+              "%s\n",
+              acc_stats_border, acc_stats_border);
+       printf("  RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+              "%-"PRIu64"\n"
+              "  TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+              "%-"PRIu64"\n",
+              total_recv, total_rx_dropped, total_recv + total_rx_dropped,
+              total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
+       if (total_rx_nombuf > 0)
+               printf("  RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
+       printf("  %s++++++++++++++++++++++++++++++++++++++++++++++"
+              "%s\n",
+              acc_stats_border, acc_stats_border);
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       if (total_recv > 0)
+               printf("\n  CPU cycles/packet=%u (total cycles="
+                      "%"PRIu64" / total RX packets=%"PRIu64")\n",
+                      (unsigned int)(fwd_cycles / total_recv),
+                      fwd_cycles, total_recv);
+#endif
+       printf("\nDone.\n");
+       test_done = 1;
+}
+
+void
+pmd_test_exit(void)
+{
+       portid_t pt_id;
+
+       for (pt_id = 0; pt_id < nb_ports; pt_id++) {
+               printf("Stopping port %d...", pt_id);
+               fflush(stdout);
+               rte_eth_dev_close(pt_id);
+               printf("done\n");
+       }
+       printf("bye...\n");
+}
+
+typedef void (*cmd_func_t)(void);
+struct pmd_test_command {
+       const char *cmd_name;
+       cmd_func_t cmd_func;
+};
+
+#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
+
+static void
+fatal_init_error(const char *func_name, uint8_t port_id, int diag)
+{
+       rte_panic("%s(port_id=%d) failed - diag=%d\n",
+                 func_name, port_id, diag);
+}
+
+static void
+init_ports(void)
+{
+       struct rte_eth_link   link;
+       struct rte_eth_conf   port_conf = {
+               .intr_conf = {
+                       .lsc = 0,
+               },
+       };
+       struct rte_eth_rxconf rx_conf;
+       struct rte_eth_txconf tx_conf;
+       struct rte_port *port;
+       unsigned int sock_id;
+       portid_t  pi;
+       queueid_t qi;
+       int diag;
+
+       port_conf.rxmode = rx_mode;
+       port_conf.fdir_conf = fdir_conf;
+
+       if (nb_rxq > 0) { /* configure RSS */
+               port_conf.rx_adv_conf.rss_conf.rss_key = NULL;
+               /* use default hash key */
+               port_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
+       } else
+               port_conf.rx_adv_conf.rss_conf.rss_hf = 0;
+       rx_conf.rx_thresh = rx_thresh;
+       rx_conf.rx_free_thresh = rx_free_thresh;
+       tx_conf.tx_thresh = tx_thresh;
+       tx_conf.tx_rs_thresh = tx_rs_thresh;
+       tx_conf.tx_free_thresh = tx_free_thresh;
+
+       for (pi = 0; pi < nb_ports; pi++) {
+               port = &ports[pi];
+               memcpy(&port->dev_conf, &port_conf, sizeof(port_conf));
+               sock_id = port->socket_id;
+               printf("Initializing port %d... ", pi);
+               fflush(stdout);
+               diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq, &port_conf);
+               if (diag != 0) {
+                       fatal_init_error("rte_eth_dev_configure", pi, diag);
+                       /* NOT REACHED */
+               }
+               rte_eth_macaddr_get(pi, &port->eth_addr);
+               for (qi = 0; qi < nb_txq; qi++) {
+                       diag = rte_eth_tx_queue_setup(pi, qi, nb_txd,
+                                                     sock_id,
+                                                     &tx_conf);
+                       if (diag != 0) {
+                               fatal_init_error("rte_eth_tx_queue_setup",
+                                                pi, diag);
+                               /* NOT REACHED */
+                       }
+               }
+               for (qi = 0; qi < nb_rxq; qi++) {
+                       diag = rte_eth_rx_queue_setup(pi, qi, nb_rxd, sock_id,
+                                                     &rx_conf,
+                                                     mbuf_pool_find(sock_id));
+                       if (diag != 0) {
+                               fatal_init_error("rte_eth_rx_queue_setup",
+                                                pi , diag);
+                               /* NOT REACHED */
+                       }
+               }
+
+               /* Start device */
+               diag = rte_eth_dev_start(pi);
+               if (diag != 0) {
+                       fatal_init_error("rte_eth_dev_start", pi, diag);
+                       /* NOT REACHED */
+               }
+               printf("done: ");
+               rte_eth_link_get(pi, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (unsigned) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               } else {
+                       printf(" Link Down\n");
+               }
+
+               /*
+                * If enabled, put device in promiscuous mode.
+                * This allows the PMD test in IO forwarding mode to forward
+                * packets to itself through 2 cross-connected  ports of the
+                * target machine.
+                */
+               if (promiscuous_on)
+                       rte_eth_promiscuous_enable(pi);
+       }
+}
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define main _main
+#endif
+
+int
+main(int argc, char** argv)
+{
+       int  diag;
+
+       diag = rte_eal_init(argc, argv);
+       if (diag < 0)
+               rte_panic("Cannot init EAL\n");
+
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init())
+               rte_panic("Cannot init igb PMD\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init())
+               rte_panic("Cannot init ixgbe PMD\n");
+
+       if (rte_ixgbevf_pmd_init())
+               rte_panic("Cannot init ixgbevf PMD\n");
+#endif
+
+       if (rte_eal_pci_probe())
+               rte_panic("Cannot probe PCI\n");
+
+       nb_ports = (portid_t) rte_eth_dev_count();
+       if (nb_ports == 0)
+               rte_exit(EXIT_FAILURE, "No probed ethernet devices - check that "
+                         "CONFIG_RTE_LIBRTE_IGB_PMD=y and that "
+                         "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in your "
+                         "configuration file\n");
+
+       set_def_fwd_config();
+       if (nb_lcores == 0)
+               rte_panic("Empty set of forwarding logical cores - check the "
+                         "core mask supplied in the command parameters\n");
+
+       argc -= diag;
+       argv += diag;
+       if (argc > 1)
+               launch_args_parse(argc, argv);
+
+       if (nb_rxq > nb_txq)
+               printf("Warning: nb_rxq=%d enables RSS configuration, "
+                      "but nb_txq=%d will prevent to fully test it.\n",
+                      nb_rxq, nb_txq);
+
+       init_config();
+
+       init_ports();
+
+       if (interactive == 1)
+               prompt();
+       else {
+               char c;
+               int rc;
+
+               printf("No commandline core given, start packet forwarding\n");
+               start_packet_forwarding(0);
+               printf("Press enter to exit\n");
+               rc = read(0, &c, 1);
+               if (rc < 0)
+                       return 1;
+       }
+
+       return 0;
+}
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
new file mode 100644 (file)
index 0000000..cc4a0fd
--- /dev/null
@@ -0,0 +1,413 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _TESTPMD_H_
+#define _TESTPMD_H_
+
+/* icc on baremetal gives us troubles with function named 'main' */
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define main _main
+int main(int argc, char **argv);
+#endif
+
+/*
+ * Default size of the mbuf data buffer to receive standard 1518-byte
+ * Ethernet frames in a mono-segment memory buffer.
+ */
+#define DEFAULT_MBUF_DATA_SIZE 2048 /**< Default size of mbuf data buffer. */
+
+/*
+ * The maximum number of segments per packet is used when creating
+ * scattered transmit packets composed of a list of mbufs.
+ */
+#define RTE_MAX_SEGS_PER_PKT 255 /**< pkt.nb_segs is a 8-bit unsigned char. */
+
+#define MAX_PKT_BURST 512
+#define DEF_PKT_BURST 16
+
+#define CACHE_LINE_SIZE_ROUNDUP(size) \
+       (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE))
+
+typedef uint8_t  lcoreid_t;
+typedef uint8_t  portid_t;
+typedef uint16_t queueid_t;
+typedef uint16_t streamid_t;
+
+#define MAX_QUEUE_ID ((1 << (sizeof(queueid_t) * 8)) - 1)
+
+enum {
+       PORT_TOPOLOGY_PAIRED,
+       PORT_TOPOLOGY_CHAINED
+};
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+/**
+ * The data structure associated with RX and TX packet burst statistics
+ * that are recorded for each forwarding stream.
+ */
+struct pkt_burst_stats {
+       unsigned int pkt_burst_spread[MAX_PKT_BURST];
+};
+#endif
+
+/**
+ * The data structure associated with a forwarding stream between a receive
+ * port/queue and a transmit port/queue.
+ */
+struct fwd_stream {
+       /* "read-only" data */
+       portid_t   rx_port;   /**< port to poll for received packets */
+       queueid_t  rx_queue;  /**< RX queue to poll on "rx_port" */
+       portid_t   tx_port;   /**< forwarding port of received packets */
+       queueid_t  tx_queue;  /**< TX queue to send forwarded packets */
+       streamid_t peer_addr; /**< index of peer ethernet address of packets */
+
+       /* "read-write" results */
+       unsigned int rx_packets;  /**< received packets */
+       unsigned int tx_packets;  /**< received packets transmitted */
+       unsigned int fwd_dropped; /**< received packets not forwarded */
+       unsigned int rx_bad_ip_csum ; /**< received packets has bad ip checksum */
+       unsigned int rx_bad_l4_csum ; /**< received packets has bad l4 checksum */
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t     core_cycles; /**< used for RX and TX processing */
+#endif
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       struct pkt_burst_stats rx_burst_stats;
+       struct pkt_burst_stats tx_burst_stats;
+#endif
+};
+
+/**
+ * The data structure associated with each port.
+ * tx_ol_flags is slightly different from ol_flags of rte_mbuf.
+ *      Bit  0: Insert IP checksum
+ *   Bit  1: Insert UDP checksum
+ *   Bit  2: Insert TCP checksum
+ *   Bit  3: Insert SCTP checksum
+ *   Bit 11: Insert VLAN Label
+ */
+struct rte_port {
+       struct rte_eth_dev_info dev_info;   /**< PCI info + driver name */
+       struct rte_eth_conf     dev_conf;   /**< Port configuration. */
+       struct ether_addr       eth_addr;   /**< Port ethernet address */
+       struct rte_eth_stats    stats;      /**< Last port statistics */
+       uint64_t                tx_dropped; /**< If no descriptor in TX ring */
+       struct fwd_stream       *rx_stream; /**< Port RX stream, if unique */
+       struct fwd_stream       *tx_stream; /**< Port TX stream, if unique */
+       unsigned int            socket_id;  /**< For NUMA support */
+       uint16_t                tx_ol_flags;/**< Offload Flags of TX packets. */
+       uint16_t                tx_vlan_id; /**< Tag Id. in TX VLAN packets. */
+       void                    *fwd_ctx;   /**< Forwarding mode context */
+       uint64_t                rx_bad_ip_csum; /**< rx pkts with bad ip checksum  */
+       uint64_t                rx_bad_l4_csum; /**< rx pkts with bad l4 checksum */
+};
+
+/**
+ * The data structure associated with each forwarding logical core.
+ * The logical cores are internally numbered by a core index from 0 to
+ * the maximum number of logical cores - 1.
+ * The system CPU identifier of all logical cores are setup in a global
+ * CPU id. configuration table.
+ */
+struct fwd_lcore {
+       struct rte_mempool *mbp; /**< The mbuf pool to use by this core */
+       streamid_t stream_idx;   /**< index of 1st stream in "fwd_streams" */
+       streamid_t stream_nb;    /**< number of streams in "fwd_streams" */
+       lcoreid_t  cpuid_idx;    /**< index of logical core in CPU id table */
+       queueid_t  tx_queue;     /**< TX queue to send forwarded packets */
+       volatile char stopped;   /**< stop forwarding when set */
+};
+
+/*
+ * Forwarding mode operations:
+ *   - IO forwarding mode (default mode)
+ *     Forwards packets unchanged.
+ *
+ *   - MAC forwarding mode
+ *     Set the source and the destination Ethernet addresses of packets
+ *     before forwarding them.
+ *
+ *   - IEEE1588 forwarding mode
+ *     Check that received IEEE1588 Precise Time Protocol (PTP) packets are
+ *     filtered and timestamped by the hardware.
+ *     Forwards packets unchanged on the same port.
+ *     Check that sent IEEE1588 PTP packets are timestamped by the hardware.
+ */
+typedef void (*port_fwd_begin_t)(portid_t pi);
+typedef void (*port_fwd_end_t)(portid_t pi);
+typedef void (*packet_fwd_t)(struct fwd_stream *fs);
+
+struct fwd_engine {
+       const char       *fwd_mode_name; /**< Forwarding mode name. */
+       port_fwd_begin_t port_fwd_begin; /**< NULL if nothing special to do. */
+       port_fwd_end_t   port_fwd_end;   /**< NULL if nothing special to do. */
+       packet_fwd_t     packet_fwd;     /**< Mandatory. */
+};
+
+extern struct fwd_engine io_fwd_engine;
+extern struct fwd_engine mac_fwd_engine;
+extern struct fwd_engine rx_only_engine;
+extern struct fwd_engine tx_only_engine;
+extern struct fwd_engine csum_fwd_engine;
+#ifdef RTE_LIBRTE_IEEE1588
+extern struct fwd_engine ieee1588_fwd_engine;
+#endif
+
+extern struct fwd_engine * fwd_engines[]; /**< NULL terminated array. */
+
+/**
+ * Forwarding Configuration
+ *
+ */
+struct fwd_config {
+       struct fwd_engine *fwd_eng; /**< Packet forwarding mode. */
+       streamid_t nb_fwd_streams;  /**< Nb. of forward streams to process. */
+       lcoreid_t  nb_fwd_lcores;   /**< Nb. of logical cores to launch. */
+       portid_t   nb_fwd_ports;    /**< Nb. of ports involved. */
+};
+
+/* globals used for configuration */
+extern uint16_t verbose_level; /**< Drives messages being displayed, if any. */
+extern uint8_t  interactive;
+extern uint8_t  numa_support; /**< set by "--numa" parameter */
+extern uint16_t port_topology; /**< set by "--port-topology" parameter */
+
+/*
+ * Configuration of logical cores:
+ * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
+ */
+extern lcoreid_t nb_lcores; /**< Number of logical cores probed at init time. */
+extern lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
+extern lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
+extern unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE];
+
+/*
+ * Configuration of Ethernet ports:
+ * nb_fwd_ports <= nb_cfg_ports <= nb_ports
+ */
+extern portid_t nb_ports; /**< Number of ethernet ports probed at init time. */
+extern portid_t nb_cfg_ports; /**< Number of configured ports. */
+extern portid_t nb_fwd_ports; /**< Number of forwarding ports. */
+extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS];
+extern struct rte_port *ports;
+
+extern struct rte_eth_rxmode rx_mode;
+extern uint16_t rss_hf;
+
+extern queueid_t nb_rxq;
+extern queueid_t nb_txq;
+
+extern uint16_t nb_rxd;
+extern uint16_t nb_txd;
+
+extern uint16_t rx_free_thresh;
+extern uint16_t tx_free_thresh;
+extern uint16_t tx_rs_thresh;
+
+extern uint16_t mbuf_data_size; /**< Mbuf data space size. */
+
+extern struct rte_fdir_conf fdir_conf;
+
+/*
+ * Configuration of packet segments used by the "txonly" processing engine.
+ */
+#define TXONLY_DEF_PACKET_LEN 64
+extern uint16_t tx_pkt_length; /**< Length of TXONLY packet */
+extern uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT]; /**< Seg. lengths */
+extern uint8_t  tx_pkt_nb_segs; /**< Number of segments in TX packets */
+
+extern uint16_t nb_pkt_per_burst;
+extern uint16_t mb_mempool_cache;
+extern struct rte_eth_thresh rx_thresh;
+extern struct rte_eth_thresh tx_thresh;
+
+extern struct fwd_config cur_fwd_config;
+extern struct fwd_engine *cur_fwd_eng;
+extern struct fwd_lcore  **fwd_lcores;
+extern struct fwd_stream **fwd_streams;
+
+extern portid_t nb_peer_eth_addrs; /**< Number of peer ethernet addresses. */
+extern struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+
+static inline unsigned int
+lcore_num(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < RTE_MAX_LCORE; ++i)
+               if (fwd_lcores_cpuids[i] == rte_lcore_id())
+                       return i;
+
+       rte_panic("lcore_id of current thread not found in fwd_lcores_cpuids\n");
+}
+
+static inline struct fwd_lcore *
+current_fwd_lcore(void)
+{
+       return fwd_lcores[lcore_num()];
+}
+
+/* Mbuf Pools */
+static inline void
+mbuf_poolname_build(unsigned int sock_id, char* mp_name, int name_size)
+{
+       rte_snprintf(mp_name, name_size, "mbuf_pool_socket_%u", sock_id);
+}
+
+static inline struct rte_mempool *
+mbuf_pool_find(unsigned int sock_id)
+{
+       char pool_name[RTE_MEMPOOL_NAMESIZE];
+
+       mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name));
+       return (rte_mempool_lookup((const char *)pool_name));
+}
+
+/**
+ * Read/Write operations on a PCI register of a port.
+ */
+static inline uint32_t
+port_pci_reg_read(struct rte_port *port, uint32_t reg_off)
+{
+       void *reg_addr;
+       uint32_t reg_v;
+
+       reg_addr = (void *)((char *)port->dev_info.pci_dev->mem_resource.addr +
+                           reg_off);
+       reg_v = *((volatile uint32_t *)reg_addr);
+       return rte_le_to_cpu_32(reg_v);
+}
+
+#define port_id_pci_reg_read(pt_id, reg_off) \
+       port_pci_reg_read(&ports[(pt_id)], (reg_off))
+
+static inline void
+port_pci_reg_write(struct rte_port *port, uint32_t reg_off, uint32_t reg_v)
+{
+       void *reg_addr;
+
+       reg_addr = (void *)((char *)port->dev_info.pci_dev->mem_resource.addr +
+                           reg_off);
+       *((volatile uint32_t *)reg_addr) = rte_cpu_to_le_32(reg_v);
+}
+
+#define port_id_pci_reg_write(pt_id, reg_off, reg_value) \
+       port_pci_reg_write(&ports[(pt_id)], (reg_off), (reg_value))
+
+/* Prototypes */
+void launch_args_parse(int argc, char** argv);
+void prompt(void);
+void nic_stats_display(portid_t port_id);
+void nic_stats_clear(portid_t port_id);
+void port_infos_display(portid_t port_id);
+void fwd_lcores_config_display(void);
+void fwd_config_display(void);
+void rxtx_config_display(void);
+void fwd_config_setup(void);
+void set_def_fwd_config(void);
+
+void port_reg_bit_display(portid_t port_id, uint32_t reg_off, uint8_t bit_pos);
+void port_reg_bit_set(portid_t port_id, uint32_t reg_off, uint8_t bit_pos,
+                     uint8_t bit_v);
+void port_reg_bit_field_display(portid_t port_id, uint32_t reg_off,
+                               uint8_t bit1_pos, uint8_t bit2_pos);
+void port_reg_bit_field_set(portid_t port_id, uint32_t reg_off,
+                           uint8_t bit1_pos, uint8_t bit2_pos, uint32_t value);
+void port_reg_display(portid_t port_id, uint32_t reg_off);
+void port_reg_set(portid_t port_id, uint32_t reg_off, uint32_t value);
+
+void rx_ring_desc_display(portid_t port_id, queueid_t rxq_id, uint16_t rxd_id);
+void tx_ring_desc_display(portid_t port_id, queueid_t txq_id, uint16_t txd_id);
+
+void set_fwd_lcores_list(unsigned int *lcorelist, unsigned int nb_lc);
+void set_fwd_lcores_mask(uint64_t lcoremask);
+void set_fwd_lcores_number(uint16_t nb_lc);
+
+void set_fwd_ports_list(unsigned int *portlist, unsigned int nb_pt);
+void set_fwd_ports_mask(uint64_t portmask);
+void set_fwd_ports_number(uint16_t nb_pt);
+
+void rx_vlan_filter_set(portid_t port_id, uint16_t vlan_id, int on);
+void rx_vlan_all_filter_set(portid_t port_id, int on);
+void tx_vlan_set(portid_t port_id, uint16_t vlan_id);
+void tx_vlan_reset(portid_t port_id);
+
+void tx_cksum_set(portid_t port_id, uint8_t cksum_mask);
+
+void set_verbose_level(uint16_t vb_level);
+void set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs);
+void set_nb_pkt_per_burst(uint16_t pkt_burst);
+void set_pkt_forwarding_mode(const char *fwd_mode);
+void start_packet_forwarding(int with_tx_first);
+void stop_packet_forwarding(void);
+void pmd_test_exit(void);
+
+void fdir_add_signature_filter(portid_t port_id, uint8_t queue_id,
+                              struct rte_fdir_filter *fdir_filter);
+void fdir_update_signature_filter(portid_t port_id, uint8_t queue_id,
+                                 struct rte_fdir_filter *fdir_filter);
+void fdir_remove_signature_filter(portid_t port_id,
+                                 struct rte_fdir_filter *fdir_filter);
+void fdir_get_infos(portid_t port_id);
+void fdir_add_perfect_filter(portid_t port_id, uint16_t soft_id,
+                            uint8_t queue_id, uint8_t drop,
+                            struct rte_fdir_filter *fdir_filter);
+void fdir_update_perfect_filter(portid_t port_id, uint16_t soft_id,
+                               uint8_t queue_id, uint8_t drop,
+                               struct rte_fdir_filter *fdir_filter);
+void fdir_remove_perfect_filter(portid_t port_id, uint16_t soft_id,
+                               struct rte_fdir_filter *fdir_filter);
+void fdir_set_masks(portid_t port_id, struct rte_fdir_masks *fdir_masks);
+
+/*
+ * Work-around of a compilation error with ICC on invocations of the
+ * rte_be_to_cpu_16() function.
+ */
+#ifdef __GCC__
+#define RTE_BE_TO_CPU_16(be_16_v)  rte_be_to_cpu_16((be_16_v))
+#define RTE_CPU_TO_BE_16(cpu_16_v) rte_cpu_to_be_16((cpu_16_v))
+#else
+#ifdef __big_endian__
+#define RTE_BE_TO_CPU_16(be_16_v)  (be_16_v)
+#define RTE_CPU_TO_BE_16(cpu_16_v) (cpu_16_v)
+#else
+#define RTE_BE_TO_CPU_16(be_16_v) \
+       (uint16_t) ((((be_16_v) & 0xFF) << 8) | ((be_16_v) >> 8))
+#define RTE_CPU_TO_BE_16(cpu_16_v) \
+       (uint16_t) ((((cpu_16_v) & 0xFF) << 8) | ((cpu_16_v) >> 8))
+#endif
+#endif /* __GCC__ */
+
+#endif /* _TESTPMD_H_ */
diff --git a/app/test-pmd/txonly.c b/app/test-pmd/txonly.c
new file mode 100644 (file)
index 0000000..bf0a3e2
--- /dev/null
@@ -0,0 +1,317 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdarg.h>
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <sys/queue.h>
+#include <sys/stat.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_memory.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+#include "testpmd.h"
+
+#define UDP_SRC_PORT 1024
+#define UDP_DST_PORT 1024
+
+#define IP_SRC_ADDR ((192 << 24) | (168 << 16) | (0 << 8) | 1)
+#define IP_DST_ADDR ((192 << 24) | (168 << 16) | (0 << 8) | 2)
+
+#define IP_DEFTTL  64   /* from RFC 1340. */
+#define IP_VERSION 0x40
+#define IP_HDRLEN  0x05 /* default IP header length == five 32-bits words. */
+#define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
+
+static struct ipv4_hdr  pkt_ip_hdr;  /**< IP header of transmitted packets. */
+static struct udp_hdr pkt_udp_hdr; /**< UDP header of transmitted packets. */
+
+static inline struct rte_mbuf *
+tx_mbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+       void *mb;
+
+       if (rte_mempool_get(mp, &mb) < 0)
+               return NULL;
+       m = (struct rte_mbuf *)mb;
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       return m;
+}
+
+static void
+copy_buf_to_pkt_segs(void* buf, unsigned len, struct rte_mbuf *pkt,
+                    unsigned offset)
+{
+       struct rte_mbuf *seg;
+       void *seg_buf;
+       unsigned copy_len;
+
+       seg = pkt;
+       while (offset >= seg->pkt.data_len) {
+               offset -= seg->pkt.data_len;
+               seg = seg->pkt.next;
+       }
+       copy_len = seg->pkt.data_len - offset;
+       seg_buf = ((char *) seg->pkt.data + offset);
+       while (len > copy_len) {
+               rte_memcpy(seg_buf, buf, (size_t) copy_len);
+               len -= copy_len;
+               buf = ((char*) buf + copy_len);
+               seg = seg->pkt.next;
+               seg_buf = seg->pkt.data;
+       }
+       rte_memcpy(seg_buf, buf, (size_t) len);
+}
+
+static inline void
+copy_buf_to_pkt(void* buf, unsigned len, struct rte_mbuf *pkt, unsigned offset)
+{
+       if (offset + len <= pkt->pkt.data_len) {
+               rte_memcpy(((char *) pkt->pkt.data + offset), buf, (size_t) len);
+               return;
+       }
+       copy_buf_to_pkt_segs(buf, len, pkt, offset);
+}
+
+static void
+setup_pkt_udp_ip_headers(struct ipv4_hdr *ip_hdr,
+                        struct udp_hdr *udp_hdr,
+                        uint16_t pkt_data_len)
+{
+       uint16_t *ptr16;
+       uint32_t ip_cksum;
+       uint16_t pkt_len;
+
+       /*
+        * Initialize UDP header.
+        */
+       pkt_len = (uint16_t) (pkt_data_len + sizeof(struct udp_hdr));
+       udp_hdr->src_port = rte_cpu_to_be_16(UDP_SRC_PORT);
+       udp_hdr->dst_port = rte_cpu_to_be_16(UDP_DST_PORT);
+       udp_hdr->dgram_len      = RTE_CPU_TO_BE_16(pkt_len);
+       udp_hdr->dgram_cksum    = 0; /* No UDP checksum. */
+
+       /*
+        * Initialize IP header.
+        */
+       pkt_len = (uint16_t) (pkt_len + sizeof(struct ipv4_hdr));
+       ip_hdr->version_ihl   = IP_VHL_DEF;
+       ip_hdr->type_of_service   = 0;
+       ip_hdr->fragment_offset = 0;
+       ip_hdr->time_to_live   = IP_DEFTTL;
+       ip_hdr->next_proto_id = IPPROTO_UDP;
+       ip_hdr->packet_id = 0;
+       ip_hdr->total_length   = RTE_CPU_TO_BE_16(pkt_len);
+       ip_hdr->src_addr = rte_cpu_to_be_32(IP_SRC_ADDR);
+       ip_hdr->dst_addr = rte_cpu_to_be_32(IP_DST_ADDR);
+
+       /*
+        * Compute IP header checksum.
+        */
+       ptr16 = (uint16_t*) ip_hdr;
+       ip_cksum = 0;
+       ip_cksum += ptr16[0]; ip_cksum += ptr16[1];
+       ip_cksum += ptr16[2]; ip_cksum += ptr16[3];
+       ip_cksum += ptr16[4];
+       ip_cksum += ptr16[6]; ip_cksum += ptr16[7];
+       ip_cksum += ptr16[8]; ip_cksum += ptr16[9];
+
+       /*
+        * Reduce 32 bit checksum to 16 bits and complement it.
+        */
+       ip_cksum = ((ip_cksum & 0xFFFF0000) >> 16) +
+               (ip_cksum & 0x0000FFFF);
+       if (ip_cksum > 65535)
+               ip_cksum -= 65535;
+       ip_cksum = (~ip_cksum) & 0x0000FFFF;
+       if (ip_cksum == 0)
+               ip_cksum = 0xFFFF;
+       ip_hdr->hdr_checksum = (uint16_t) ip_cksum;
+}
+
+/*
+ * Transmit a burst of multi-segments packets.
+ */
+static void
+pkt_burst_transmit(struct fwd_stream *fs)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_mbuf *pkt;
+       struct rte_mbuf *pkt_seg;
+       struct rte_mempool *mbp;
+       struct ether_hdr eth_hdr;
+       uint16_t nb_tx;
+       uint16_t nb_pkt;
+       uint16_t vlan_tci;
+       uint16_t ol_flags;
+       uint8_t  i;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       uint64_t start_tsc;
+       uint64_t end_tsc;
+       uint64_t core_cycles;
+#endif
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       start_tsc = rte_rdtsc();
+#endif
+
+       mbp = current_fwd_lcore()->mbp;
+       vlan_tci = ports[fs->tx_port].tx_vlan_id;
+       ol_flags = ports[fs->tx_port].tx_ol_flags;
+       for (nb_pkt = 0; nb_pkt < nb_pkt_per_burst; nb_pkt++) {
+               pkt = tx_mbuf_alloc(mbp);
+               if (pkt == NULL) {
+               nomore_mbuf:
+                       if (nb_pkt == 0)
+                               return;
+                       break;
+               }
+               pkt->pkt.data_len = tx_pkt_seg_lengths[0];
+               pkt_seg = pkt;
+               for (i = 1; i < tx_pkt_nb_segs; i++) {
+                       pkt_seg->pkt.next = tx_mbuf_alloc(mbp);
+                       if (pkt_seg->pkt.next == NULL) {
+                               rte_pktmbuf_free(pkt);
+                               goto nomore_mbuf;
+                       }
+                       pkt_seg = pkt_seg->pkt.next;
+                       pkt_seg->pkt.data_len = tx_pkt_seg_lengths[i];
+               }
+               pkt_seg->pkt.next = NULL; /* Last segment of packet. */
+
+               /*
+                * Initialize Ethernet header.
+                */
+               ether_addr_copy(&peer_eth_addrs[fs->peer_addr],&eth_hdr.d_addr);
+               ether_addr_copy(&ports[fs->tx_port].eth_addr, &eth_hdr.s_addr);
+               eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
+
+               /*
+                * Copy headers in first packet segment(s).
+                */
+               copy_buf_to_pkt(&eth_hdr, sizeof(eth_hdr), pkt, 0);
+               copy_buf_to_pkt(&pkt_ip_hdr, sizeof(pkt_ip_hdr), pkt,
+                               sizeof(struct ether_hdr));
+               copy_buf_to_pkt(&pkt_udp_hdr, sizeof(pkt_udp_hdr), pkt,
+                               sizeof(struct ether_hdr) +
+                               sizeof(struct ipv4_hdr));
+
+               /*
+                * Complete first mbuf of packet and append it to the
+                * burst of packets to be transmitted.
+                */
+               pkt->pkt.nb_segs = tx_pkt_nb_segs;
+               pkt->pkt.pkt_len = tx_pkt_length;
+               pkt->ol_flags = ol_flags;
+               pkt->pkt.vlan_tci  = vlan_tci;
+               pkt->pkt.l2_len = sizeof(struct ether_hdr);
+               pkt->pkt.l3_len = sizeof(struct ipv4_hdr);
+               pkts_burst[nb_pkt] = pkt;
+       }
+       nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
+       fs->tx_packets += nb_tx;
+
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+       fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
+#endif
+       if (unlikely(nb_tx < nb_pkt)) {
+               if (verbose_level > 0 && fs->fwd_dropped == 0)
+                       printf("port %d tx_queue %d - drop "
+                              "(nb_pkt:%u - nb_tx:%u)=%u packets\n",
+                              fs->tx_port, fs->tx_queue,
+                              (unsigned) nb_pkt, (unsigned) nb_tx,
+                              (unsigned) (nb_pkt - nb_tx));
+               fs->fwd_dropped += (nb_pkt - nb_tx);
+               do {
+                       rte_pktmbuf_free(pkts_burst[nb_tx]);
+               } while (++nb_tx < nb_pkt);
+       }
+
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+       end_tsc = rte_rdtsc();
+       core_cycles = (end_tsc - start_tsc);
+       fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
+#endif
+}
+
+static void
+tx_only_begin(__attribute__((unused)) portid_t pi)
+{
+       uint16_t pkt_data_len;
+
+       pkt_data_len = (uint16_t) (tx_pkt_length - (sizeof(struct ether_hdr) +
+                                                   sizeof(struct ipv4_hdr) +
+                                                   sizeof(struct udp_hdr)));
+       setup_pkt_udp_ip_headers(&pkt_ip_hdr, &pkt_udp_hdr, pkt_data_len);
+}
+
+struct fwd_engine tx_only_engine = {
+       .fwd_mode_name  = "txonly",
+       .port_fwd_begin = tx_only_begin,
+       .port_fwd_end   = NULL,
+       .packet_fwd     = pkt_burst_transmit,
+};
diff --git a/app/test/Makefile b/app/test/Makefile
new file mode 100644 (file)
index 0000000..80d210d
--- /dev/null
@@ -0,0 +1,82 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+APP = test
+
+#
+# all sources are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_APP_TEST) := commands.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_pci.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_prefetch.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_byteorder.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_per_lcore.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_atomic.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_malloc.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_cycles.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_spinlock.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_memory.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_memzone.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_ring.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_rwlock.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_timer.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_mempool.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_mbuf.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_logs.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_memcpy.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_hash.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_lpm.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_debug.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_errno.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_tailq.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_string_fns.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_mp_secondary.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_cpuflags.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_eal_flags.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_alarm.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_interrupts.c
+SRCS-$(CONFIG_RTE_APP_TEST) += test_version.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# this application needs libraries first
+DEPDIRS-$(CONFIG_RTE_APP_TEST) += lib
+
+include $(RTE_SDK)/mk/rte.app.mk
diff --git a/app/test/autotest.py b/app/test/autotest.py
new file mode 100755 (executable)
index 0000000..2609142
--- /dev/null
@@ -0,0 +1,664 @@
+#!/usr/bin/python
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# Script that uses qemu controlled by python-pexpect to check that
+# all autotests are working in the baremetal environment.
+
+import sys, pexpect, time, os, re
+
+directory = sys.argv[2]
+target = sys.argv[3]
+log_file = "%s.txt"%(target)
+
+if "baremetal" in target:
+    cmdline  = "qemu-system-x86_64 -cdrom %s.iso -boot d "%(sys.argv[1])
+    cmdline    += "-m 2000 -smp 4 -nographic -net nic,model=e1000"
+    platform = "QEMU x86_64"
+else:
+    cmdline  = "%s -c f -n 4"%(sys.argv[1])
+    try:
+        platform = open("/root/rte_platform_model.txt").read()
+    except:
+        platform = "unknown"
+
+print cmdline
+
+report_hdr=""".. <COPYRIGHT_TAG>
+
+"""
+
+test_whitelist=None
+test_blacklist=None
+
+class SubTest:
+    "Defines a subtest"
+    def __init__(self, title, function, command=None, timeout=10, genreport=None):
+        self.title = title
+        self.function = function
+        self.command = command
+        self.timeout = timeout
+        self.genreport = genreport
+
+class AutoTest:
+    """This class contains all methods needed to launch several
+    automatic tests, archive test results, log, and generate a nice
+    test report in restructured text"""
+
+    title = "new"
+    mainlog = None
+    logbuf = None
+    literal = 0
+    test_list = []
+    report_list = []
+    child = None
+
+    def __init__(self, pexpectchild, filename, mode):
+        "Init the Autotest class"
+        self.mainlog = file(filename, mode)
+        self.child = pexpectchild
+        pexpectchild.logfile = self
+    def register(self, filename, title, subtest_list):
+        "Register a test with a list of subtests"
+        test = {}
+        test["filename"] = filename
+        test["title"] = title
+        test["subtest_list"] = subtest_list
+        self.test_list.append(test)
+
+    def start(self):
+        "start the tests, and fill the internal report_list field"
+        for t in self.test_list:
+            report = {}
+            report["date"] = time.asctime()
+            report["title"] = t["title"]
+            report["filename"] = t["filename"]
+            report["subreport_list"] = []
+            report["fails"] = 0
+            report["success"] = 0
+            report["subreport_list"] = []
+            for st in t["subtest_list"]:
+                if test_whitelist is not None and st.title not in test_whitelist:
+                    continue
+                if test_blacklist is not None and st.title in test_blacklist:
+                    continue
+                subreport = {}
+                self.reportbuf = ""
+                subreport["title"] = st.title
+                subreport["func"] = st.function
+                subreport["command"] = st.command
+                subreport["timeout"] = st.timeout
+                subreport["genreport"] = st.genreport
+
+                # launch subtest
+                print "%s (%s): "%(subreport["title"], subreport["command"]),
+                sys.stdout.flush()
+                start = time.time()
+                res = subreport["func"](self.child,
+                                        command = subreport["command"],
+                                        timeout = subreport["timeout"])
+                t = int(time.time() - start)
+
+                subreport["time"] = "%dmn%d"%(t/60, t%60)
+                subreport["result"] = res[0] # 0 or -1
+                subreport["result_str"] = res[1] # cause of fail
+                subreport["logs"] = self.reportbuf
+                print "%s [%s]"%(subreport["result_str"], subreport["time"])
+                if subreport["result"] == 0:
+                    report["success"] += 1
+                else:
+                    report["fails"] += 1
+                report["subreport_list"].append(subreport)
+            self.report_list.append(report)
+
+    def gen_report(self):
+        for report in self.report_list:
+            # main report header and stats
+            self.literal = 0
+            reportlog = file(report["filename"], "w")
+            reportlog.write(report_hdr)
+            reportlog.write(report["title"] + "\n")
+            reportlog.write(re.sub(".", "=", report["title"]) + "\n\n")
+            reportlog.write("Autogenerated test report:\n\n" )
+            reportlog.write("- date: **%s**\n"%(report["date"]))
+            reportlog.write("- target: **%s**\n"%(target))
+            reportlog.write("- success: **%d**\n"%(report["success"]))
+            reportlog.write("- fails: **%d**\n"%(report["fails"]))
+            reportlog.write("- platform: **%s**\n\n"%(platform))
+
+            # summary
+            reportlog.write(".. csv-table:: Test results summary\n")
+            reportlog.write('   :header: "Name", "Result"\n\n')
+            for subreport in report["subreport_list"]:
+                if subreport["result"] == 0:
+                    res_str = "Success"
+                else:
+                    res_str = "Failure"
+                reportlog.write('   "%s", "%s"\n'%(subreport["title"], res_str))
+            reportlog.write('\n')
+
+            # subreports
+            for subreport in report["subreport_list"]:
+                # print subtitle
+                reportlog.write(subreport["title"] + "\n")
+                reportlog.write(re.sub(".", "-", subreport["title"]) + "\n\n")
+                # print logs
+                reportlog.write("::\n  \n  ")
+                s = subreport["logs"].replace("\n", "\n  ")
+                reportlog.write(s)
+                # print result
+                reportlog.write("\n\n")
+                reportlog.write("**" + subreport["result_str"] + "**\n\n")
+                # custom genreport
+                if subreport["genreport"] != None:
+                    s = subreport["genreport"]()
+                    reportlog.write(s)
+
+            reportlog.close()
+
+        # displayed on console
+        print
+        print "-------------------------"
+        print
+        if report["fails"] == 0:
+            print "All test OK"
+        else:
+            print "%s test(s) failed"%(report["fails"])
+
+    # file API, to store logs from pexpect
+    def write(self, buf):
+        s = buf[:]
+        s = s.replace("\r", "")
+        self.mainlog.write(s)
+        self.reportbuf += s
+    def flush(self):
+        self.mainlog.flush()
+    def close(self):
+        self.mainlog.close()
+
+
+# Try to match prompt: return 0 on success, else return -1
+def wait_prompt(child):
+    for i in range(3):
+        index = child.expect(["RTE>>", pexpect.TIMEOUT], timeout = 1)
+        child.sendline("")
+        if index == 0:
+            return 0
+    print "Cannot find prompt"
+    return -1
+
+# Try to match prompt after boot: return 0 on success, else return -1
+def wait_boot(child):
+    index = child.expect(["RTE>>", pexpect.TIMEOUT],
+                         timeout = 120)
+    if index == 0:
+        return 0
+    if (wait_prompt(child) == -1):
+        print "Target did not boot, failed"
+        return -1
+    return 0
+
+# quit RTE
+def quit(child):
+    if wait_boot(child) != 0:
+        return -1, "Cannot find prompt"
+    child.sendline("quit")
+    return 0, "Success"
+
+# Default function to launch an autotest that does not need to
+# interact with the user. Basically, this function calls the autotest
+# function through command line interface, then check that it displays
+# "Test OK" or "Test Failed".
+def default_autotest(child, command, timeout=10):
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    index = child.expect(["Test OK", "Test Failed",
+                          pexpect.TIMEOUT], timeout = timeout)
+    if index == 1:
+        return -1, "Failed"
+    elif index == 2:
+        return -1, "Failed [Timeout]"
+    return 0, "Success"
+
+# wait boot
+def boot_autotest(child, **kargs):
+    if wait_boot(child) != 0:
+        return -1, "Cannot find prompt"
+    return 0, "Success"
+
+# Test memory dump. We need to check that at least one memory zone is
+# displayed.
+def memory_autotest(child, command, **kargs):
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    regexp = "phys:0x[0-9a-f]*, len:0x([0-9a-f]*), virt:0x[0-9a-f]*, socket_id:[0-9]*"
+    index = child.expect([regexp, pexpect.TIMEOUT], timeout = 180)
+    if index != 0:
+        return -1, "Failed: timeout"
+    size = int(child.match.groups()[0], 16)
+    if size <= 0:
+        return -1, "Failed: bad size"
+    index = child.expect(["Test OK", "Test Failed",
+                          pexpect.TIMEOUT], timeout = 10)
+    if index == 1:
+        return -1, "Failed: C code returned an error"
+    elif index == 2:
+        return -1, "Failed: timeout"
+    return 0, "Success"
+
+# Test some libc functions including scanf. This requires a
+# interaction with the user (simulated in expect), so we cannot use
+# default_autotest() here.
+def string_autotest(child, command, **kargs):
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    index = child.expect(["Now, test scanf, enter this number",
+                          pexpect.TIMEOUT], timeout = 10)
+    if index != 0:
+        return -1, "Failed: timeout"
+    child.sendline("123456")
+    index = child.expect(["number=123456", pexpect.TIMEOUT], timeout = 10)
+    if index != 0:
+        return -1, "Failed: timeout (2)"
+    index = child.expect(["Test OK", "Test Failed",
+                          pexpect.TIMEOUT], timeout = 10)
+    if index != 0:
+        return -1, "Failed: C code returned an error"
+    return 0, "Success"
+
+# Test spinlock. This requires to check the order of displayed lines:
+# we cannot use default_autotest() here.
+def spinlock_autotest(child, command, **kargs):
+    i = 0
+    ir = 0
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    while True:
+        index = child.expect(["Test OK",
+                              "Test Failed",
+                              "Hello from core ([0-9]*) !",
+                              "Hello from within recursive locks from ([0-9]*) !",
+                              pexpect.TIMEOUT], timeout = 20)
+        # ok
+        if index == 0:
+            break
+
+        # message, check ordering
+        elif index == 2:
+            if int(child.match.groups()[0]) < i:
+                return -1, "Failed: bad order"
+            i = int(child.match.groups()[0])
+        elif index == 3:
+            if int(child.match.groups()[0]) < ir:
+                return -1, "Failed: bad order"
+            ir = int(child.match.groups()[0])
+
+        # fail
+        else:
+            return -1, "Failed: timeout or error"
+
+    return 0, "Success"
+
+
+# Test rwlock. This requires to check the order of displayed lines:
+# we cannot use default_autotest() here.
+def rwlock_autotest(child, command, **kargs):
+    i = 0
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    while True:
+        index = child.expect(["Test OK",
+                              "Test Failed",
+                              "Hello from core ([0-9]*) !",
+                              "Global write lock taken on master core ([0-9]*)",
+                              pexpect.TIMEOUT], timeout = 10)
+        # ok
+        if index == 0:
+            if i != 0xffff:
+                return -1, "Failed: a message is missing"
+            break
+
+        # message, check ordering
+        elif index == 2:
+            if int(child.match.groups()[0]) < i:
+                return -1, "Failed: bad order"
+            i = int(child.match.groups()[0])
+
+        # must be the last message, check ordering
+        elif index == 3:
+            i = 0xffff
+
+        # fail
+        else:
+            return -1, "Failed: timeout or error"
+
+    return 0, "Success"
+
+# Test logs. This requires to check the order of displayed lines:
+# we cannot use default_autotest() here.
+def logs_autotest(child, command, **kargs):
+    i = 0
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+
+    log_list = [
+        "TESTAPP1: this is a debug level message",
+        "TESTAPP1: this is a info level message",
+        "TESTAPP1: this is a warning level message",
+        "TESTAPP2: this is a info level message",
+        "TESTAPP2: this is a warning level message",
+        "TESTAPP1: this is a debug level message",
+        "TESTAPP1: this is a debug level message",
+        "TESTAPP1: this is a info level message",
+        "TESTAPP1: this is a warning level message",
+        "TESTAPP2: this is a info level message",
+        "TESTAPP2: this is a warning level message",
+        "TESTAPP1: this is a debug level message",
+        ]
+
+    for log_msg in log_list:
+        index = child.expect([log_msg,
+                              "Test OK",
+                              "Test Failed",
+                              pexpect.TIMEOUT], timeout = 10)
+
+        # not ok
+        if index != 0:
+            return -1, "Failed: timeout or error"
+
+    index = child.expect(["Test OK",
+                          "Test Failed",
+                          pexpect.TIMEOUT], timeout = 10)
+
+    return 0, "Success"
+
+# Test timers. This requires to check the order of displayed lines:
+# we cannot use default_autotest() here.
+def timer_autotest(child, command, **kargs):
+    i = 0
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+
+    index = child.expect(["Start timer stress tests \(30 seconds\)",
+                          "Test Failed",
+                          pexpect.TIMEOUT], timeout = 10)
+
+    # not ok
+    if index != 0:
+        return -1, "Failed: timeout or error"
+
+    index = child.expect(["Start timer basic tests \(30 seconds\)",
+                          "Test Failed",
+                          pexpect.TIMEOUT], timeout = 40)
+
+    # not ok
+    if index != 0:
+        return -1, "Failed: timeout or error (2)"
+
+    prev_lcore_timer1 = -1
+
+    lcore_tim0 = -1
+    lcore_tim1 = -1
+    lcore_tim2 = -1
+    lcore_tim3 = -1
+
+    while True:
+        index = child.expect(["TESTTIMER: ([0-9]*): callback id=([0-9]*) count=([0-9]*) on core ([0-9]*)",
+                              "Test OK",
+                              "Test Failed",
+                              pexpect.TIMEOUT], timeout = 10)
+
+        if index == 1:
+            break
+
+        if index != 0:
+            return -1, "Failed: timeout or error (3)"
+
+        try:
+            t = int(child.match.groups()[0])
+            id = int(child.match.groups()[1])
+            cnt = int(child.match.groups()[2])
+            lcore = int(child.match.groups()[3])
+        except:
+            return -1, "Failed: cannot parse output"
+
+        # timer0 always expires on the same core when cnt < 20
+        if id == 0:
+            if lcore_tim0 == -1:
+                lcore_tim0 = lcore
+            elif lcore != lcore_tim0 and cnt < 20:
+                return -1, "Failed: lcore != lcore_tim0 (%d, %d)"%(lcore, lcore_tim0)
+            if cnt > 21:
+                return -1, "Failed: tim0 cnt > 21"
+
+        # timer1 each time expires on a different core
+        if id == 1:
+            if lcore == lcore_tim1:
+                return -1, "Failed: lcore == lcore_tim1 (%d, %d)"%(lcore, lcore_tim1)
+            lcore_tim1 = lcore
+            if cnt > 10:
+                return -1, "Failed: tim1 cnt > 30"
+
+        # timer0 always expires on the same core
+        if id == 2:
+            if lcore_tim2 == -1:
+                lcore_tim2 = lcore
+            elif lcore != lcore_tim2:
+                return -1, "Failed: lcore != lcore_tim2 (%d, %d)"%(lcore, lcore_tim2)
+            if cnt > 30:
+                return -1, "Failed: tim2 cnt > 30"
+
+        # timer0 always expires on the same core
+        if id == 3:
+            if lcore_tim3 == -1:
+                lcore_tim3 = lcore
+            elif lcore != lcore_tim3:
+                return -1, "Failed: lcore_tim3 changed (%d -> %d)"%(lcore, lcore_tim3)
+            if cnt > 30:
+                return -1, "Failed: tim3 cnt > 30"
+
+    # must be 2 different cores
+    if lcore_tim0 == lcore_tim3:
+        return -1, "Failed: lcore_tim0 (%d) == lcore_tim3 (%d)"%(lcore_tim0, lcore_tim3)
+
+    return 0, "Success"
+
+# Ring autotest
+def ring_autotest(child, command, timeout=10):
+    if wait_prompt(child) != 0:
+        return -1, "Failed: cannot find prompt"
+    child.sendline(command)
+    index = child.expect(["Test OK", "Test Failed",
+                          pexpect.TIMEOUT], timeout = timeout)
+    if index != 0:
+        return -1, "Failed"
+
+    child.sendline("set_watermark test 100")
+    child.sendline("set_quota test 16")
+    child.sendline("dump_ring test")
+    index = child.expect(["  watermark=100",
+                          pexpect.TIMEOUT], timeout = 1)
+    if index != 0:
+        return -1, "Failed: bad watermark"
+
+    index = child.expect(["  bulk_default=16",
+                          pexpect.TIMEOUT], timeout = 1)
+    if index != 0:
+        return -1, "Failed: bad quota"
+
+    return 0, "Success"
+
+def ring_genreport():
+    s  = "Performance curves\n"
+    s += "------------------\n\n"
+    sdk = os.getenv("RTE_SDK")
+    script = os.path.join(sdk, "app/test/graph_ring.py")
+    title ='"Autotest %s %s"'%(target, time.asctime())
+    filename = target + ".txt"
+    os.system("/usr/bin/python %s %s %s"%(script, filename, title))
+    for f in os.listdir("."):
+        if not f.startswith("ring"):
+            continue
+        if not f.endswith(".svg"):
+            continue
+        # skip single producer/consumer
+        if "_sc" in f:
+            continue
+        if "_sp" in f:
+            continue
+        f = f[:-4] + ".png"
+        s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f)
+        s += "   :width: 50%\n\n"
+        s += "   %s\n\n"%(f)
+    return s
+
+def mempool_genreport():
+    s  = "Performance curves\n"
+    s += "------------------\n\n"
+    sdk = os.getenv("RTE_SDK")
+    script = os.path.join(sdk, "app/test/graph_mempool.py")
+    title ='"Autotest %s %s"'%(target, time.asctime())
+    filename = target + ".txt"
+    os.system("/usr/bin/python %s %s %s"%(script, filename, title))
+    for f in os.listdir("."):
+        if not f.startswith("mempool"):
+            continue
+        if not f.endswith(".svg"):
+            continue
+        # skip when n_keep = 128
+        if "_128." in f:
+            continue
+        f = f[:-4] + ".png"
+        s += ".. figure:: ../../images/autotests/%s/%s\n"%(target, f)
+        s += "   :width: 50%\n\n"
+        s += "   %s\n\n"%(f)
+    return s
+
+#
+# main
+#
+
+if len(sys.argv) > 4:
+    testlist=sys.argv[4].split(',')
+    if testlist[0].startswith('-'):
+        testlist[0]=testlist[0].lstrip('-')
+        test_blacklist=testlist
+    else:
+        test_whitelist=testlist
+
+child = pexpect.spawn(cmdline)
+autotest = AutoTest(child, log_file,'w')
+
+# timeout for memcpy and hash test
+if "baremetal" in target:
+    timeout = 60*180
+else:
+    timeout = 180
+
+autotest.register("eal_report.rst", "EAL-%s"%(target),
+                  [ SubTest("Boot", boot_autotest, "boot_autotest"),
+                    SubTest("EAL Flags", default_autotest, "eal_flags_autotest"),
+                    SubTest("Version", default_autotest, "version_autotest"),
+                    SubTest("PCI", default_autotest, "pci_autotest"),
+                    SubTest("Memory", memory_autotest, "memory_autotest"),
+                    SubTest("Lcore launch", default_autotest, "per_lcore_autotest"),
+                    SubTest("Spinlock", spinlock_autotest, "spinlock_autotest"),
+                    SubTest("Rwlock", rwlock_autotest, "rwlock_autotest"),
+                    SubTest("Atomic", default_autotest, "atomic_autotest"),
+                    SubTest("Byte order", default_autotest, "byteorder_autotest"),
+                    SubTest("Prefetch", default_autotest, "prefetch_autotest"),
+                    SubTest("Debug", default_autotest, "debug_autotest"),
+                    SubTest("Cycles", default_autotest, "cycles_autotest"),
+                    SubTest("Logs", logs_autotest, "logs_autotest"),
+                    SubTest("Memzone", default_autotest, "memzone_autotest"),
+                    SubTest("Cpu flags", default_autotest, "cpuflags_autotest"),
+                    SubTest("Memcpy", default_autotest, "memcpy_autotest", timeout),
+                    SubTest("String Functions", default_autotest, "string_autotest"),
+                    SubTest("Alarm", default_autotest, "alarm_autotest", 30),
+                    SubTest("Interrupt", default_autotest, "interrupt_autotest"),
+                    ])
+
+autotest.register("ring_report.rst", "Ring-%s"%(target),
+                  [ SubTest("Ring", ring_autotest, "ring_autotest", 30*60,
+                            ring_genreport)
+                    ])
+
+if "baremetal" in target:
+    timeout = 60*60*3
+else:
+    timeout = 60*30
+
+autotest.register("mempool_report.rst", "Mempool-%s"%(target),
+                  [ SubTest("Mempool", default_autotest, "mempool_autotest",
+                            timeout, mempool_genreport)
+                    ])
+autotest.register("mbuf_report.rst", "Mbuf-%s"%(target),
+                  [ SubTest("Mbuf", default_autotest, "mbuf_autotest", timeout=120)
+                    ])
+autotest.register("timer_report.rst", "Timer-%s"%(target),
+                  [ SubTest("Timer", timer_autotest, "timer_autotest")
+                    ])
+autotest.register("malloc_report.rst", "Malloc-%s"%(target),
+                  [ SubTest("Malloc", default_autotest, "malloc_autotest")
+                    ])
+
+# only do the hash autotest if supported by the platform
+if not (platform.startswith("Intel(R) Core(TM)2 Quad CPU") or
+        platform.startswith("QEMU")):
+    autotest.register("hash_report.rst", "Hash-%s"%(target),
+                      [ SubTest("Hash", default_autotest, "hash_autotest", timeout)
+                        ])
+
+autotest.register("lpm_report.rst", "LPM-%s"%(target),
+                  [ SubTest("Lpm", default_autotest, "lpm_autotest", timeout)
+                    ])
+autotest.register("eal2_report.rst", "EAL2-%s"%(target),
+                  [ SubTest("TailQ", default_autotest, "tailq_autotest"),
+                   SubTest("Errno", default_autotest, "errno_autotest"),
+                   SubTest("Multiprocess", default_autotest, "multiprocess_autotest")
+                    ])
+
+autotest.start()
+autotest.gen_report()
+
+quit(child)
+child.terminate()
+sys.exit(0)
diff --git a/app/test/commands.c b/app/test/commands.c
new file mode 100644 (file)
index 0000000..a1d23d8
--- /dev/null
@@ -0,0 +1,391 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <netinet/in.h>
+#include <termios.h>
+#ifndef __linux__
+#include <net/socket.h>
+#endif
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_timer.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+
+#include "test.h"
+
+/****************/
+
+struct cmd_autotest_result {
+       cmdline_fixed_string_t autotest;
+};
+
+static void cmd_autotest_parsed(void *parsed_result,
+                               __attribute__((unused)) struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_autotest_result *res = parsed_result;
+       int ret = 0;
+       int all = 0;
+
+       if (!strcmp(res->autotest, "all_autotests"))
+               all = 1;
+
+       if (all || !strcmp(res->autotest, "version_autotest"))
+               ret |= test_version();
+       if (all || !strcmp(res->autotest, "debug_autotest"))
+               ret |= test_debug();
+       if (all || !strcmp(res->autotest, "pci_autotest"))
+               ret |= test_pci();
+       if (all || !strcmp(res->autotest, "prefetch_autotest"))
+               ret |= test_prefetch();
+       if (all || !strcmp(res->autotest, "byteorder_autotest"))
+               ret |= test_byteorder();
+       if (all || !strcmp(res->autotest, "per_lcore_autotest"))
+               ret |= test_per_lcore();
+       if (all || !strcmp(res->autotest, "atomic_autotest"))
+               ret |= test_atomic();
+       if (all || !strcmp(res->autotest, "malloc_autotest"))
+               ret |= test_malloc();
+       if (all || !strcmp(res->autotest, "spinlock_autotest"))
+               ret |= test_spinlock();
+       if (all || !strcmp(res->autotest, "memory_autotest"))
+               ret |= test_memory();
+       if (all || !strcmp(res->autotest, "memzone_autotest"))
+               ret |= test_memzone();
+       if (all || !strcmp(res->autotest, "rwlock_autotest"))
+               ret |= test_rwlock();
+       if (all || !strcmp(res->autotest, "mbuf_autotest"))
+               ret |= test_mbuf();
+       if (all || !strcmp(res->autotest, "logs_autotest"))
+               ret |= test_logs();
+       if (all || !strcmp(res->autotest, "errno_autotest"))
+               ret |= test_errno();
+       if (all || !strcmp(res->autotest, "hash_autotest"))
+               ret |= test_hash();
+       if (all || !strcmp(res->autotest, "lpm_autotest"))
+               ret |= test_lpm();
+       if (all || !strcmp(res->autotest, "cpuflags_autotest"))
+               ret |= test_cpuflags();
+       /* tailq autotest must go after all lpm and hashs tests or any other
+        * tests which need to create tailq objects (ring and mempool are implicitly
+        * created in earlier tests so can go later)
+        */
+       if (all || !strcmp(res->autotest, "tailq_autotest"))
+               ret |= test_tailq();
+       if (all || !strcmp(res->autotest, "multiprocess_autotest"))
+               ret |= test_mp_secondary();
+       if (all || !strcmp(res->autotest, "memcpy_autotest"))
+               ret |= test_memcpy();
+       if (all || !strcmp(res->autotest, "string_autotest"))
+               ret |= test_string_fns();
+       if (all || !strcmp(res->autotest, "eal_flags_autotest"))
+               ret |= test_eal_flags();
+       if (all || !strcmp(res->autotest, "alarm_autotest"))
+               ret |= test_alarm();
+       if (all || !strcmp(res->autotest, "interrupt_autotest"))
+               ret |= test_interrupt();
+       if (all || !strcmp(res->autotest, "cycles_autotest"))
+               ret |= test_cycles();
+       if (all || !strcmp(res->autotest, "ring_autotest"))
+               ret |= test_ring();
+       if (all || !strcmp(res->autotest, "timer_autotest"))
+               ret |= test_timer();
+       if (all || !strcmp(res->autotest, "mempool_autotest"))
+               ret |= test_mempool();
+
+       if (ret == 0)
+               printf("Test OK\n");
+       else
+               printf("Test Failed\n");
+       fflush(stdout);
+}
+
+cmdline_parse_token_string_t cmd_autotest_autotest =
+       TOKEN_STRING_INITIALIZER(struct cmd_autotest_result, autotest,
+                       "pci_autotest#memory_autotest#"
+                       "per_lcore_autotest#spinlock_autotest#"
+                       "rwlock_autotest#atomic_autotest#"
+                       "byteorder_autotest#prefetch_autotest#"
+                       "cycles_autotest#logs_autotest#"
+                       "memzone_autotest#ring_autotest#"
+                       "mempool_autotest#mbuf_autotest#"
+                       "timer_autotest#malloc_autotest#"
+                       "memcpy_autotest#hash_autotest#"
+                       "lpm_autotest#debug_autotest#"
+                       "errno_autotest#tailq_autotest#"
+                       "string_autotest#multiprocess_autotest#"
+                       "cpuflags_autotest#eal_flags_autotest#"
+                       "alarm_autotest#interrupt_autotest#"
+                       "version_autotest#"
+                       "all_autotests");
+
+cmdline_parse_inst_t cmd_autotest = {
+       .f = cmd_autotest_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "launch autotest",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_autotest_autotest,
+               NULL,
+       },
+};
+
+/****************/
+
+struct cmd_dump_result {
+       cmdline_fixed_string_t dump;
+};
+
+static void
+dump_struct_sizes(void)
+{
+#define DUMP_SIZE(t) printf("sizeof(" #t ") = %u\n", (unsigned)sizeof(t));
+       DUMP_SIZE(struct rte_mbuf);
+       DUMP_SIZE(struct rte_pktmbuf);
+       DUMP_SIZE(struct rte_ctrlmbuf);
+       DUMP_SIZE(struct rte_mempool);
+       DUMP_SIZE(struct rte_ring);
+#undef DUMP_SIZE
+}
+
+static void cmd_dump_parsed(void *parsed_result,
+                           __attribute__((unused)) struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       struct cmd_dump_result *res = parsed_result;
+
+       if (!strcmp(res->dump, "dump_physmem"))
+               rte_dump_physmem_layout();
+       else if (!strcmp(res->dump, "dump_memzone"))
+               rte_memzone_dump();
+       else if (!strcmp(res->dump, "dump_log_history"))
+               rte_log_dump_history();
+       else if (!strcmp(res->dump, "dump_struct_sizes"))
+               dump_struct_sizes();
+       else if (!strcmp(res->dump, "dump_ring"))
+               rte_ring_list_dump();
+       else if (!strcmp(res->dump, "dump_mempool"))
+               rte_mempool_list_dump();
+}
+
+cmdline_parse_token_string_t cmd_dump_dump =
+       TOKEN_STRING_INITIALIZER(struct cmd_dump_result, dump,
+                                "dump_physmem#dump_memzone#dump_log_history#"
+                                "dump_struct_sizes#dump_ring#dump_mempool");
+
+cmdline_parse_inst_t cmd_dump = {
+       .f = cmd_dump_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "dump status",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_dump_dump,
+               NULL,
+       },
+};
+
+/****************/
+
+struct cmd_dump_one_result {
+       cmdline_fixed_string_t dump;
+       cmdline_fixed_string_t name;
+};
+
+static void cmd_dump_one_parsed(void *parsed_result, struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_dump_one_result *res = parsed_result;
+
+       if (!strcmp(res->dump, "dump_ring")) {
+               struct rte_ring *r;
+               r = rte_ring_lookup(res->name);
+               if (r == NULL) {
+                       cmdline_printf(cl, "Cannot find ring\n");
+                       return;
+               }
+               rte_ring_dump(r);
+       }
+       else if (!strcmp(res->dump, "dump_mempool")) {
+               struct rte_mempool *mp;
+               mp = rte_mempool_lookup(res->name);
+               if (mp == NULL) {
+                       cmdline_printf(cl, "Cannot find mempool\n");
+                       return;
+               }
+               rte_mempool_dump(mp);
+       }
+}
+
+cmdline_parse_token_string_t cmd_dump_one_dump =
+       TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, dump,
+                                "dump_ring#dump_mempool");
+
+cmdline_parse_token_string_t cmd_dump_one_name =
+       TOKEN_STRING_INITIALIZER(struct cmd_dump_one_result, name, NULL);
+
+cmdline_parse_inst_t cmd_dump_one = {
+       .f = cmd_dump_one_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "dump one ring/mempool: dump_ring|dump_mempool <name>",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_dump_one_dump,
+               (void *)&cmd_dump_one_name,
+               NULL,
+       },
+};
+
+/****************/
+
+struct cmd_set_ring_result {
+       cmdline_fixed_string_t set;
+       cmdline_fixed_string_t name;
+       uint32_t value;
+};
+
+static void cmd_set_ring_parsed(void *parsed_result, struct cmdline *cl,
+                               __attribute__((unused)) void *data)
+{
+       struct cmd_set_ring_result *res = parsed_result;
+       struct rte_ring *r;
+       int ret;
+
+       r = rte_ring_lookup(res->name);
+       if (r == NULL) {
+               cmdline_printf(cl, "Cannot find ring\n");
+               return;
+       }
+
+       if (!strcmp(res->set, "set_quota")) {
+               ret = rte_ring_set_bulk_count(r, res->value);
+               if (ret != 0)
+                       cmdline_printf(cl, "Cannot set quota\n");
+       }
+       else if (!strcmp(res->set, "set_watermark")) {
+               ret = rte_ring_set_water_mark(r, res->value);
+               if (ret != 0)
+                       cmdline_printf(cl, "Cannot set water mark\n");
+       }
+}
+
+cmdline_parse_token_string_t cmd_set_ring_set =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, set,
+                                "set_quota#set_watermark");
+
+cmdline_parse_token_string_t cmd_set_ring_name =
+       TOKEN_STRING_INITIALIZER(struct cmd_set_ring_result, name, NULL);
+
+cmdline_parse_token_num_t cmd_set_ring_value =
+       TOKEN_NUM_INITIALIZER(struct cmd_set_ring_result, value, UINT32);
+
+cmdline_parse_inst_t cmd_set_ring = {
+       .f = cmd_set_ring_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "set quota/watermark: "
+                       "set_quota|set_watermark <ring_name> <value>",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_set_ring_set,
+               (void *)&cmd_set_ring_name,
+               (void *)&cmd_set_ring_value,
+               NULL,
+       },
+};
+
+/****************/
+
+struct cmd_quit_result {
+       cmdline_fixed_string_t quit;
+};
+
+static void
+cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+               struct cmdline *cl,
+               __attribute__((unused)) void *data)
+{
+       cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+       TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit,
+                                "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+       .f = cmd_quit_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "exit application",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_quit_quit,
+               NULL,
+       },
+};
+
+/****************/
+
+cmdline_parse_ctx_t main_ctx[] = {
+       (cmdline_parse_inst_t *)&cmd_autotest,
+       (cmdline_parse_inst_t *)&cmd_dump,
+       (cmdline_parse_inst_t *)&cmd_dump_one,
+       (cmdline_parse_inst_t *)&cmd_set_ring,
+       (cmdline_parse_inst_t *)&cmd_quit,
+       NULL,
+};
+
diff --git a/app/test/graph_mempool.py b/app/test/graph_mempool.py
new file mode 100755 (executable)
index 0000000..46e3e7b
--- /dev/null
@@ -0,0 +1,193 @@
+#!/usr/bin/env python
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+import sys, re
+import numpy as np
+import matplotlib
+matplotlib.use('Agg') # we don't want to use X11
+import matplotlib.pyplot as plt
+from matplotlib.ticker import FuncFormatter
+
+INT = "([-+]?[0-9][0-9]*)"
+
+class MempoolTest:
+    l = []
+
+    def __init__(self):
+        pass
+
+    # sort a test case list
+    def sort(self, x, y):
+        for t in [ "cache", "cores", "n_get_bulk", "n_put_bulk",
+                   "n_keep", "rate" ]:
+            if x[t] > y[t]:
+                return 1
+            if x[t] < y[t]:
+                return -1
+        return 0
+
+    # add a test case
+    def add(self, **args):
+        self.l.append(args)
+
+    # get an ordered list matching parameters
+    # ex: r.get(enq_core=1, deq_core=1)
+    def get(self, **args):
+        retlist = []
+        for t in self.l:
+            add_it = 1
+            for a in args:
+                if args[a] != t[a]:
+                    add_it = 0
+                    break
+            if add_it:
+                retlist.append(t)
+        retlist.sort(cmp=self.sort)
+        return retlist
+
+    # return an ordered list of all values for this param or param list
+    # ex: r.get_value_list("enq_core")
+    def get_value_list(self, param):
+        retlist = []
+        if type(param) is not list:
+            param = [param]
+        for t in self.l:
+            entry = []
+            for p in param:
+                entry.append(t[p])
+            if len(entry) == 1:
+                entry = entry[0]
+            else:
+                entry = tuple(entry)
+            if not entry in retlist:
+                retlist.append(entry)
+        retlist.sort()
+        return retlist
+
+# read the file and return a MempoolTest object containing all data
+def read_data_from_file(filename):
+
+    mempool_test = MempoolTest()
+
+    # parse the file: it produces a list of dict containing the data for
+    # each test case (each dict in the list corresponds to a line)
+    f = open(filename)
+    while True:
+        l = f.readline()
+
+        if l == "":
+            break
+
+        regexp  = "mempool_autotest "
+        regexp += "cache=%s cores=%s "%(INT, INT)
+        regexp += "n_get_bulk=%s n_put_bulk=%s "%(INT, INT)
+        regexp += "n_keep=%s rate_persec=%s"%(INT, INT)
+        m = re.match(regexp, l)
+        if m == None:
+            continue
+
+        mempool_test.add(cache = int(m.groups()[0]),
+                         cores = int(m.groups()[1]),
+                         n_get_bulk = int(m.groups()[2]),
+                         n_put_bulk = int(m.groups()[3]),
+                         n_keep = int(m.groups()[4]),
+                         rate = int(m.groups()[5]))
+
+    f.close()
+    return mempool_test
+
+def millions(x, pos):
+    return '%1.1fM' % (x*1e-6)
+
+# graph one, with specific parameters -> generate a .svg file
+def graph_one(str, mempool_test, cache, cores, n_keep):
+    filename = "mempool_%d_%d_%d.svg"%(cache, cores, n_keep)
+
+    n_get_bulk_list = mempool_test.get_value_list("n_get_bulk")
+    N_n_get_bulk = len(n_get_bulk_list)
+    get_names = map(lambda x:"get=%d"%x, n_get_bulk_list)
+
+    n_put_bulk_list = mempool_test.get_value_list("n_put_bulk")
+    N_n_put_bulk = len(n_put_bulk_list)
+    put_names = map(lambda x:"put=%d"%x, n_put_bulk_list)
+
+    N = N_n_get_bulk * (N_n_put_bulk + 1)
+    rates = []
+
+    colors = []
+    for n_get_bulk in mempool_test.get_value_list("n_get_bulk"):
+        col = 0.
+        for n_put_bulk in mempool_test.get_value_list("n_put_bulk"):
+            col += 0.9 / len(mempool_test.get_value_list("n_put_bulk"))
+            r = mempool_test.get(cache=cache, cores=cores,
+                                 n_get_bulk=n_get_bulk,
+                                 n_put_bulk=n_put_bulk, n_keep=n_keep)
+            if len(r) != 0:
+                r = r[0]["rate"]
+                rates.append(r)
+            colors.append((1. - col, 0.2, col, 1.)) # rgba
+
+        rates.append(0)
+        colors.append((0.,0.,0.,0.))
+
+    ind = np.arange(N)  # the x locations for the groups
+    width = 1           # the width of the bars: can also be len(x) sequence
+
+
+    formatter = FuncFormatter(millions)
+    fig = plt.figure()
+    p = plt.bar(ind, tuple(rates), width, color=tuple(colors))
+    fig.axes[0].yaxis.set_major_formatter(formatter)
+
+    plt.ylabel('Obj/sec')
+    #plt.ylim(0, 400000000.)
+    title  = "Mempool autotest \"%s\"\n"%(str)
+    title += "cache=%d, core(s)=%d, n_keep=%d"%(cache, cores, n_keep)
+    plt.title(title)
+    ind_names = np.arange(N_n_get_bulk) * (N_n_put_bulk+1) + (N_n_put_bulk+1) / 2
+    plt.xticks(ind_names, tuple(get_names))
+    plt.legend(tuple([p[i] for i in range(N_n_put_bulk)]), tuple(put_names),
+               loc="upper left")
+    plt.savefig(filename)
+
+if len(sys.argv) != 3:
+    print "usage: graph_mempool.py file title"
+    sys.exit(1)
+
+mempool_test = read_data_from_file(sys.argv[1])
+
+for cache, cores, n_keep in mempool_test.get_value_list(["cache", "cores",
+                                                         "n_keep"]):
+    graph_one(sys.argv[2], mempool_test, cache, cores, n_keep)
diff --git a/app/test/graph_ring.py b/app/test/graph_ring.py
new file mode 100755 (executable)
index 0000000..02c4228
--- /dev/null
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+import sys, re
+import numpy as np
+import matplotlib
+matplotlib.use('Agg') # we don't want to use X11
+import matplotlib.pyplot as plt
+from matplotlib.ticker import FuncFormatter
+
+INT = "([-+]?[0-9][0-9]*)"
+
+class RingTest:
+    l = []
+
+    def __init__(self):
+        pass
+
+    # sort a test case list
+    def sort(self, x, y):
+        for t in [ "enq_core", "deq_core", "enq_bulk", "deq_bulk", "rate" ]:
+            if x[t] > y[t]:
+                return 1
+            if x[t] < y[t]:
+                return -1
+        return 0
+
+    # add a test case
+    def add(self, **args):
+        self.l.append(args)
+
+    # get an ordered list matching parameters
+    # ex: r.get(enq_core=1, deq_core=1)
+    def get(self, **args):
+        retlist = []
+        for t in self.l:
+            add_it = 1
+            for a in args:
+                if args[a] != t[a]:
+                    add_it = 0
+                    break
+            if add_it:
+                retlist.append(t)
+        retlist.sort(cmp=self.sort)
+        return retlist
+
+    # return an ordered list of all values for this param or param list
+    # ex: r.get_value_list("enq_core")
+    def get_value_list(self, param):
+        retlist = []
+        if type(param) is not list:
+            param = [param]
+        for t in self.l:
+            entry = []
+            for p in param:
+                entry.append(t[p])
+            if len(entry) == 1:
+                entry = entry[0]
+            else:
+                entry = tuple(entry)
+            if not entry in retlist:
+                retlist.append(entry)
+        retlist.sort()
+        return retlist
+
+# read the file and return a RingTest object containing all data
+def read_data_from_file(filename):
+
+    ring_test = RingTest()
+
+    # parse the file: it produces a list of dict containing the data for
+    # each test case (each dict in the list corresponds to a line)
+    f = open(filename)
+    while True:
+        l = f.readline()
+
+        if l == "":
+            break
+
+        regexp  = "ring_autotest "
+        regexp += "e/d_core=%s,%s e/d_bulk=%s,%s "%(INT, INT, INT, INT)
+        regexp += "sp=%s sc=%s "%(INT, INT)
+        regexp += "rate_persec=%s"%(INT)
+        m = re.match(regexp, l)
+        if m == None:
+            continue
+
+        ring_test.add(enq_core = int(m.groups()[0]),
+                      deq_core = int(m.groups()[1]),
+                      enq_bulk = int(m.groups()[2]),
+                      deq_bulk = int(m.groups()[3]),
+                      sp = int(m.groups()[4]),
+                      sc = int(m.groups()[5]),
+                      rate = int(m.groups()[6]))
+
+    f.close()
+    return ring_test
+
+def millions(x, pos):
+    return '%1.1fM' % (x*1e-6)
+
+# graph one, with specific parameters -> generate a .svg file
+def graph_one(str, ring_test, enq_core, deq_core, sp, sc):
+    filename = "ring_%d_%d"%(enq_core, deq_core)
+    if sp:
+        sp_str = "sp"
+    else:
+        sp_str = "mp"
+    if sc:
+        sc_str = "sc"
+    else:
+        sc_str = "mc"
+    filename += "_%s_%s.svg"%(sp_str, sc_str)
+
+
+    enq_bulk_list = ring_test.get_value_list("enq_bulk")
+    N_enq_bulk = len(enq_bulk_list)
+    enq_names = map(lambda x:"enq=%d"%x, enq_bulk_list)
+
+    deq_bulk_list = ring_test.get_value_list("deq_bulk")
+    N_deq_bulk = len(deq_bulk_list)
+    deq_names = map(lambda x:"deq=%d"%x, deq_bulk_list)
+
+    N = N_enq_bulk * (N_deq_bulk + 1)
+    rates = []
+
+    colors = []
+    for enq_bulk in ring_test.get_value_list("enq_bulk"):
+        col = 0.
+        for deq_bulk in ring_test.get_value_list("deq_bulk"):
+            col += 0.9 / len(ring_test.get_value_list("deq_bulk"))
+            r = ring_test.get(enq_core=enq_core, deq_core=deq_core,
+                              enq_bulk=enq_bulk, deq_bulk=deq_bulk,
+                              sp=sp, sc=sc)
+            r = r[0]["rate"]
+            rates.append(r)
+            colors.append((1. - col, 0.2, col, 1.)) # rgba
+
+        rates.append(0)
+        colors.append((0.,0.,0.,0.))
+
+    ind = np.arange(N)  # the x locations for the groups
+    width = 1           # the width of the bars: can also be len(x) sequence
+
+
+    formatter = FuncFormatter(millions)
+    fig = plt.figure()
+    p = plt.bar(ind, tuple(rates), width, color=tuple(colors))
+    fig.axes[0].yaxis.set_major_formatter(formatter)
+
+    plt.ylabel('Obj/sec')
+    #plt.ylim(0, 400000000.)
+    plt.title("Ring autotest \"%s\"\nenq core(s)=%d, deq core(s)=%d, %s, %s"\
+                  %(str, enq_core, deq_core, sp_str, sc_str))
+    ind_names = np.arange(N_enq_bulk) * (N_deq_bulk+1) + (N_deq_bulk+1) / 2
+    plt.xticks(ind_names, tuple(enq_names))
+    plt.legend(tuple([p[i] for i in range(N_deq_bulk)]), tuple(deq_names),
+               loc="upper left")
+    plt.savefig(filename)
+
+if len(sys.argv) != 3:
+    print "usage: graph_ring.py file title"
+    sys.exit(1)
+
+ring_test = read_data_from_file(sys.argv[1])
+
+for enq_core, deq_core, sp, sc in \
+        ring_test.get_value_list(["enq_core", "deq_core", "sp", "sc"]):
+    graph_one(sys.argv[2], ring_test, enq_core, deq_core, sp, sc)
diff --git a/app/test/process.h b/app/test/process.h
new file mode 100644 (file)
index 0000000..0dbc898
--- /dev/null
@@ -0,0 +1,89 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _PROCESS_H_
+#define _PROCESS_H_
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+
+/*
+ * launches a second copy of the test process using the given argv parameters,
+ * which should include argv[0] as the process name. To identify in the
+ * subprocess the source of the call, the env_value parameter is set in the
+ * environment as $RTE_TEST
+ */
+static inline int
+process_dup(const char *const argv[], int numargs, const char *env_value)
+{
+       char *argv_cpy[numargs + 1];
+       int i, fd, status;
+       char path[32];
+
+       pid_t pid = fork();
+       if (pid < 0)
+               return -1;
+       else if (pid == 0) {
+               /* make a copy of the arguments to be passed to exec */
+               for (i = 0; i < numargs; i++)
+                       argv_cpy[i] = strdup(argv[i]);
+               argv_cpy[i] = NULL;
+
+               /* close all open file descriptors, check /proc/self/fd to only
+                * call close on open fds. Exclude fds 0, 1 and 2*/
+               for (fd = getdtablesize(); fd > 2; fd-- ) {
+                       rte_snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
+                       if (access(path, F_OK) == 0)
+                               close(fd);
+               }
+               printf("Running binary with argv[]:");
+               for (i = 0; i < numargs; i++)
+                       printf("'%s' ", argv_cpy[i]);
+               printf("\n");
+
+               /* set the environment variable */
+               if (setenv(RECURSIVE_ENV_VAR, env_value, 1) != 0)
+                       rte_panic("Cannot export environment variable\n");
+               if (execv("/proc/self/exe", argv_cpy) < 0)
+                       rte_panic("Cannot exec\n");
+       }
+       /* parent process does a wait */
+       while (wait(&status) != pid)
+                       ;
+       return status;
+}
+
+#endif /* not baremetal */
+
+#endif /* _PROCESS_H_ */
diff --git a/app/test/test.c b/app/test/test.c
new file mode 100644 (file)
index 0000000..f98656c
--- /dev/null
@@ -0,0 +1,153 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <termios.h>
+#include <ctype.h>
+#include <sys/queue.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_timer.h>
+#include <rte_string_fns.h>
+
+#include "test.h"
+
+const char *prgname; /* to be set to argv[0] */
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+static const char *recursive_call; /* used in linuxapp for MP and other tests */
+
+static int
+no_action(void){ return 0; }
+
+static int
+do_recursive_call(void)
+{
+       unsigned i;
+       struct {
+               const char *env_var;
+               int (*action_fn)(void);
+       } actions[] =  {
+                       { "run_secondary_instances", test_mp_secondary },
+                       { "test_missing_c_flag", no_action },
+                       { "test_missing_n_flag", no_action },
+                       { "test_no_hpet_flag", no_action },
+                       { "test_invalid_b_flag", no_action },
+                       { "test_invalid_r_flag", no_action },
+                       { "test_misc_flags", no_action },
+       };
+
+       if (recursive_call == NULL)
+               return -1;
+       for (i = 0; i < sizeof(actions)/sizeof(actions[0]); i++) {
+               if (strcmp(actions[i].env_var, recursive_call) == 0)
+                       return (actions[i].action_fn)();
+       }
+       return -1;
+}
+#endif
+
+void
+test_hexdump(const char *title, const void *buf, unsigned int len)
+{
+       unsigned int i, out, ofs;
+       const unsigned char *data = buf;
+#define LINE_LEN 80
+       char line[LINE_LEN];    /* space needed 8+16*3+3+16 == 75 */
+
+       printf("%s at [%p], len=%u\n", title, data, len);
+       ofs = 0;
+       while (ofs < len) {
+               /* format 1 line in the buffer, then use printf to print them */
+               out = rte_snprintf(line, LINE_LEN, "%08X", ofs);
+               for (i = 0; ofs+i < len && i < 16; i++)
+                       out += rte_snprintf(line+out, LINE_LEN - out, " %02X",
+                                       data[ofs+i]&0xff);
+               for(; i <= 16; i++)
+                       out += rte_snprintf(line+out, LINE_LEN - out, "   ");
+               for(i = 0; ofs < len && i < 16; i++, ofs++) {
+                       unsigned char c = data[ofs];
+                       if (!isascii(c) || !isprint(c))
+                               c = '.';
+                       out += rte_snprintf(line+out, LINE_LEN - out, "%c", c);
+               }
+               printf("%s\n", line);
+       }
+}
+
+int
+main(int argc, char **argv)
+{
+       struct cmdline *cl;
+       int ret;
+
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               return -1;
+
+       rte_timer_subsystem_init();
+
+       argc -= ret;
+       argv += ret;
+
+       prgname = argv[0];
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+       if ((recursive_call = getenv(RECURSIVE_ENV_VAR)) != NULL)
+               return do_recursive_call();
+#endif
+
+       cl = cmdline_stdin_new(main_ctx, "RTE>>");
+       if (cl == NULL) {
+               return -1;
+       }
+       cmdline_interact(cl);
+       cmdline_stdin_exit(cl);
+
+       return 0;
+}
diff --git a/app/test/test.h b/app/test/test.h
new file mode 100644 (file)
index 0000000..3c927d2
--- /dev/null
@@ -0,0 +1,85 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _TEST_H_
+#define _TEST_H_
+
+/* icc on baremetal gives us troubles with function named 'main' */
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define main _main
+#endif
+
+#define RECURSIVE_ENV_VAR "RTE_TEST_RECURSIVE"
+
+extern const char *prgname;
+
+extern cmdline_parse_ctx_t main_ctx[];
+
+void test_hexdump(const char *title, const void *buf, unsigned int len);
+
+int main(int argc, char **argv);
+
+int test_pci(void);
+int test_memory(void);
+int test_per_lcore(void);
+int test_spinlock(void);
+int test_rwlock(void);
+int test_atomic(void);
+int test_byteorder(void);
+int test_prefetch(void);
+int test_cycles(void);
+int test_logs(void);
+int test_memzone(void);
+int test_ring(void);
+int test_mempool(void);
+int test_mbuf(void);
+int test_timer(void);
+int test_malloc(void);
+int test_memcpy(void);
+int test_hash(void);
+int test_lpm(void);
+int test_debug(void);
+int test_errno(void);
+int test_tailq(void);
+int test_string_fns(void);
+int test_mp_secondary(void);
+int test_cpuflags(void);
+int test_eal_flags(void);
+int test_alarm(void);
+int test_interrupt(void);
+int test_version(void);
+int test_pci_run;
+
+#endif
diff --git a/app/test/test_alarm.c b/app/test/test_alarm.c
new file mode 100644 (file)
index 0000000..5e36a3d
--- /dev/null
@@ -0,0 +1,258 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+#include <rte_common.h>
+#include <rte_atomic.h>
+#include <rte_alarm.h>
+
+#include "test.h"
+
+#define US_PER_MS 1000
+
+#define RTE_TEST_ALARM_TIMEOUT 3000 /* ms */
+#define RTE_TEST_CHECK_PERIOD  1000 /* ms */
+
+static volatile int flag;
+
+static void
+test_alarm_callback(void *cb_arg)
+{
+       flag = 1;
+       printf("Callback setting flag - OK. [cb_arg = %p]\n", cb_arg);
+}
+
+static rte_atomic32_t cb_count;
+
+static void
+test_multi_cb(void *arg)
+{
+       rte_atomic32_inc(&cb_count);
+       printf("In %s - arg = %p\n", __func__, arg);
+}
+
+static volatile int recursive_error = 0;
+
+static void
+test_remove_in_callback(void *arg)
+{
+       printf("In %s - arg = %p\n", __func__, arg);
+       if (rte_eal_alarm_cancel(test_remove_in_callback, arg) ||
+                       rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1)) {
+               printf("Error - cancelling callback from within function succeeded!\n");
+               recursive_error = 1;
+       }
+       flag = (int)((uintptr_t)arg);
+}
+
+static volatile int flag_2;
+
+static void
+test_remove_in_callback_2(void *arg)
+{
+       if (rte_eal_alarm_cancel(test_remove_in_callback_2, arg) || rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1)) {
+               printf("Error - cancelling callback of test_remove_in_callback_2\n");
+               return;
+       }
+       flag_2 = 1;
+}
+
+static int
+test_multi_alarms(void)
+{
+       int rm_count = 0;
+       cb_count.cnt = 0;
+
+       printf("Expect 6 callbacks in order...\n");
+       /* add two alarms in order */
+       rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
+       rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);
+
+       /* now add in reverse order */
+       rte_eal_alarm_set(6000 * US_PER_MS, test_multi_cb, (void *)6);
+       rte_eal_alarm_set(5000 * US_PER_MS, test_multi_cb, (void *)5);
+       rte_eal_alarm_set(4000 * US_PER_MS, test_multi_cb, (void *)4);
+       rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);
+
+       /* wait for expiry */
+       rte_delay_ms(6500);
+       if (cb_count.cnt != 6) {
+               printf("Missing callbacks\n");
+               /* remove any callbacks that might remain */
+               rte_eal_alarm_cancel(test_multi_cb, (void *)-1);
+               return -1;
+       }
+
+       cb_count.cnt = 0;
+       printf("Expect only callbacks with args 1 and 3...\n");
+       /* Add 3 flags, then delete one */
+       rte_eal_alarm_set(3000 * US_PER_MS, test_multi_cb, (void *)3);
+       rte_eal_alarm_set(2000 * US_PER_MS, test_multi_cb, (void *)2);
+       rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
+       rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *)2);
+
+       rte_delay_ms(3500);
+       if (cb_count.cnt != 2 || rm_count != 1) {
+               printf("Error: invalid flags count or alarm removal failure"
+                               " -  flags value = %d, expected = %d\n", cb_count.cnt, 2);
+               /* remove any callbacks that might remain */
+               rte_eal_alarm_cancel(test_multi_cb, (void *)-1);
+               return -1;
+       }
+
+       printf("Testing adding and then removing multiple alarms\n");
+       /* finally test that no callbacks are called if we delete them all*/
+       rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)1);
+       rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)2);
+       rte_eal_alarm_set(1000 * US_PER_MS, test_multi_cb, (void *)3);
+       rm_count = rte_eal_alarm_cancel(test_alarm_callback, (void *)-1);
+       if (rm_count != 0) {
+               printf("Error removing non-existant alarm succeeded\n");
+               rte_eal_alarm_cancel(test_multi_cb, (void *) -1);
+               return -1;
+       }
+       rm_count = rte_eal_alarm_cancel(test_multi_cb, (void *) -1);
+       if (rm_count != 3) {
+               printf("Error removing all pending alarm callbacks\n");
+               return -1;
+       }
+
+       /* Test that we cannot cancel an alarm from within the callback itself
+        * Also test that we can cancel head-of-line callbacks ok.*/
+       flag = 0;
+       recursive_error = 0;
+       rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
+       rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback, (void *)2);
+       rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)1);
+       if (rm_count != 1) {
+               printf("Error cancelling head-of-list callback\n");
+               return -1;
+       }
+       rte_delay_ms(1500);
+       if (flag != 0) {
+               printf("Error, cancelling head-of-list leads to premature callback\n");
+               return -1;
+       }
+       rte_delay_ms(1000);
+       if (flag != 2) {
+               printf("Error - expected callback not called\n");
+               rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
+               return -1;
+       }
+       if (recursive_error == 1)
+               return -1;
+
+       /* Check if it can cancel all for the same callback */
+       printf("Testing canceling all for the same callback\n");
+       flag_2 = 0;
+       rte_eal_alarm_set(1000 * US_PER_MS, test_remove_in_callback, (void *)1);
+       rte_eal_alarm_set(2000 * US_PER_MS, test_remove_in_callback_2, (void *)2);
+       rte_eal_alarm_set(3000 * US_PER_MS, test_remove_in_callback_2, (void *)3);
+       rte_eal_alarm_set(4000 * US_PER_MS, test_remove_in_callback, (void *)4);
+       rm_count = rte_eal_alarm_cancel(test_remove_in_callback_2, (void *)-1);
+       if (rm_count != 2) {
+               printf("Error, cannot cancel all for the same callback\n");
+               return -1;
+       }
+       rm_count = rte_eal_alarm_cancel(test_remove_in_callback, (void *)-1);
+       if (rm_count != 2) {
+               printf("Error, cannot cancel all for the same callback\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+test_alarm(void)
+{
+       int count = 0;
+
+       /* check if the callback will be called */
+       printf("check if the callback will be called\n");
+       flag = 0;
+       if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT * US_PER_MS,
+                       test_alarm_callback, NULL) < 0) {
+               printf("fail to set alarm callback\n");
+               return -1;
+       }
+       while (flag == 0 && count ++ < 6)
+               rte_delay_ms(RTE_TEST_CHECK_PERIOD);
+
+       if (flag == 0){
+               printf("Callback not called\n");
+               return -1;
+       }
+
+       /* check if it will fail to set alarm with wrong us value */
+       printf("check if it will fail to set alarm with wrong ms values\n");
+       if (rte_eal_alarm_set(0, test_alarm_callback,
+                                               NULL) >= 0) {
+               printf("should not be successful with 0 us value\n");
+               return -1;
+       }
+       if (rte_eal_alarm_set(UINT64_MAX - 1, test_alarm_callback,
+                                               NULL) >= 0) {
+               printf("should not be successful with (UINT64_MAX-1) us value\n");
+               return -1;
+       }
+
+       /* check if it will fail to set alarm with null callback parameter */
+       printf("check if it will fail to set alarm with null callback parameter\n");
+       if (rte_eal_alarm_set(RTE_TEST_ALARM_TIMEOUT, NULL, NULL) >= 0) {
+               printf("should not be successful to set alarm with null callback parameter\n");
+               return -1;
+       }
+
+       /* check if it will fail to remove alarm with null callback parameter */
+       printf("check if it will fail to remove alarm with null callback parameter\n");
+       if (rte_eal_alarm_cancel(NULL, NULL) == 0) {
+               printf("should not be successful to remove alarm with null callback parameter");
+               return -1;
+       }
+
+       if (test_multi_alarms() != 0)
+               return -1;
+
+       return 0;
+}
+
diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c
new file mode 100644 (file)
index 0000000..b64f361
--- /dev/null
@@ -0,0 +1,381 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+#include "test.h"
+
+/*
+ * Atomic Variables
+ * ================
+ *
+ * - The main test function performs three subtests. The first test
+ *   checks that the usual inc/dec/add/sub functions are working
+ *   correctly:
+ *
+ *   - Initialize 16-bit, 32-bit and 64-bit atomic variables to specific
+ *     values.
+ *
+ *   - These variables are incremented and decremented on each core at
+ *     the same time in ``test_atomic_usual()``.
+ *
+ *   - The function checks that once all lcores finish their function,
+ *     the value of the atomic variables are still the same.
+ *
+ * - The second test verifies the behavior of "test and set" functions.
+ *
+ *   - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
+ *
+ *   - Invoke ``test_atomic_tas()`` on each lcore: before doing anything
+ *     else. The cores are waiting a synchro using ``while
+ *     (rte_atomic32_read(&val) == 0)`` which is triggered by the main test
+ *     function. Then all cores do a
+ *     ``rte_atomicXX_test_and_set()`` at the same time. If it is successful,
+ *     it increments another atomic counter.
+ *
+ *   - The main function checks that the atomic counter was incremented
+ *     twice only (one for 16-bit, one for 32-bit and one for 64-bit values).
+ *
+ * - Test "add/sub and return"
+ *
+ *   - Initialize 16-bit, 32-bit and 64-bit atomic variables to zero.
+ *
+ *   - Invoke ``test_atomic_addsub_return()`` on each lcore. Before doing
+ *     anything else, the cores are waiting a synchro. Each lcore does
+ *     this operation several times::
+ *
+ *       tmp = rte_atomicXX_add_return(&a, 1);
+ *       atomic_add(&count, tmp);
+ *       tmp = rte_atomicXX_sub_return(&a, 1);
+ *       atomic_sub(&count, tmp+1);
+ *
+ *   - At the end of the test, the *count* value must be 0.
+ */
+
+#define NUM_ATOMIC_TYPES 3
+
+#define N 10000
+
+static rte_atomic16_t a16;
+static rte_atomic32_t a32;
+static rte_atomic64_t a64;
+static rte_atomic32_t count;
+static rte_atomic32_t synchro;
+
+static int
+test_atomic_usual(__attribute__((unused)) void *arg)
+{
+       unsigned i;
+
+       while (rte_atomic32_read(&synchro) == 0)
+               ;
+
+       for (i = 0; i < N; i++)
+               rte_atomic16_inc(&a16);
+       for (i = 0; i < N; i++)
+               rte_atomic16_dec(&a16);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic16_add(&a16, 5);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic16_sub(&a16, 5);
+
+       for (i = 0; i < N; i++)
+               rte_atomic32_inc(&a32);
+       for (i = 0; i < N; i++)
+               rte_atomic32_dec(&a32);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic32_add(&a32, 5);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic32_sub(&a32, 5);
+
+       for (i = 0; i < N; i++)
+               rte_atomic64_inc(&a64);
+       for (i = 0; i < N; i++)
+               rte_atomic64_dec(&a64);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic64_add(&a64, 5);
+       for (i = 0; i < (N / 5); i++)
+               rte_atomic64_sub(&a64, 5);
+
+       return 0;
+}
+
+static int
+test_atomic_tas(__attribute__((unused)) void *arg)
+{
+       while (rte_atomic32_read(&synchro) == 0)
+               ;
+
+       if (rte_atomic16_test_and_set(&a16))
+               rte_atomic32_inc(&count);
+       if (rte_atomic32_test_and_set(&a32))
+               rte_atomic32_inc(&count);
+       if (rte_atomic64_test_and_set(&a64))
+               rte_atomic32_inc(&count);
+
+       return 0;
+}
+
+static int
+test_atomic_addsub_and_return(__attribute__((unused)) void *arg)
+{
+       uint32_t tmp16;
+       uint32_t tmp32;
+       uint64_t tmp64;
+       unsigned i;
+
+       while (rte_atomic32_read(&synchro) == 0)
+               ;
+
+       for (i = 0; i < N; i++) {
+               tmp16 = rte_atomic16_add_return(&a16, 1);
+               rte_atomic32_add(&count, tmp16);
+
+               tmp16 = rte_atomic16_sub_return(&a16, 1);
+               rte_atomic32_sub(&count, tmp16+1);
+
+               tmp32 = rte_atomic32_add_return(&a32, 1);
+               rte_atomic32_add(&count, tmp32);
+
+               tmp32 = rte_atomic32_sub_return(&a32, 1);
+               rte_atomic32_sub(&count, tmp32+1);
+
+               tmp64 = rte_atomic64_add_return(&a64, 1);
+               rte_atomic32_add(&count, tmp64);
+
+               tmp64 = rte_atomic64_sub_return(&a64, 1);
+               rte_atomic32_sub(&count, tmp64+1);
+       }
+
+       return 0;
+}
+
+/*
+ * rte_atomic32_inc_and_test() would increase a 32 bits counter by one and then
+ * test if that counter is equal to 0. It would return true if the counter is 0
+ * and false if the counter is not 0. rte_atomic64_inc_and_test() could do the
+ * same thing but for a 64 bits counter.
+ * Here checks that if the 32/64 bits counter is equal to 0 after being atomically
+ * increased by one. If it is, increase the variable of "count" by one which would
+ * be checked as the result later.
+ *
+ */
+static int
+test_atomic_inc_and_test(__attribute__((unused)) void *arg)
+{
+       while (rte_atomic32_read(&synchro) == 0)
+               ;
+
+       if (rte_atomic16_inc_and_test(&a16)) {
+               rte_atomic32_inc(&count);
+       }
+       if (rte_atomic32_inc_and_test(&a32)) {
+               rte_atomic32_inc(&count);
+       }
+       if (rte_atomic64_inc_and_test(&a64)) {
+               rte_atomic32_inc(&count);
+       }
+
+       return 0;
+}
+
+/*
+ * rte_atomicXX_dec_and_test() should decrease a 32 bits counter by one and then
+ * test if that counter is equal to 0. It should return true if the counter is 0
+ * and false if the counter is not 0.
+ * This test checks if the counter is equal to 0 after being atomically
+ * decreased by one. If it is, increase the value of "count" by one which is to
+ * be checked as the result later.
+ */
+static int
+test_atomic_dec_and_test(__attribute__((unused)) void *arg)
+{
+       while (rte_atomic32_read(&synchro) == 0)
+               ;
+
+       if (rte_atomic16_dec_and_test(&a16))
+               rte_atomic32_inc(&count);
+
+       if (rte_atomic32_dec_and_test(&a32))
+               rte_atomic32_inc(&count);
+
+       if (rte_atomic64_dec_and_test(&a64))
+               rte_atomic32_inc(&count);
+
+       return 0;
+}
+
+int
+test_atomic(void)
+{
+       rte_atomic16_init(&a16);
+       rte_atomic32_init(&a32);
+       rte_atomic64_init(&a64);
+       rte_atomic32_init(&count);
+       rte_atomic32_init(&synchro);
+
+       rte_atomic16_set(&a16, 1UL << 10);
+       rte_atomic32_set(&a32, 1UL << 10);
+       rte_atomic64_set(&a64, 1ULL << 33);
+
+       printf("usual inc/dec/add/sub functions\n");
+
+       rte_eal_mp_remote_launch(test_atomic_usual, NULL, SKIP_MASTER);
+       rte_atomic32_set(&synchro, 1);
+       rte_eal_mp_wait_lcore();
+       rte_atomic32_set(&synchro, 0);
+
+       if (rte_atomic16_read(&a16) != 1UL << 10) {
+               printf("Atomic16 usual functions failed\n");
+               return -1;
+       }
+
+       if (rte_atomic32_read(&a32) != 1UL << 10) {
+               printf("Atomic32 usual functions failed\n");
+               return -1;
+       }
+
+       if (rte_atomic64_read(&a64) != 1ULL << 33) {
+               printf("Atomic64 usual functions failed\n");
+               return -1;
+       }
+
+       printf("test and set\n");
+
+       rte_atomic64_set(&a64, 0);
+       rte_atomic32_set(&a32, 0);
+       rte_atomic16_set(&a16, 0);
+       rte_atomic32_set(&count, 0);
+       rte_eal_mp_remote_launch(test_atomic_tas, NULL, SKIP_MASTER);
+       rte_atomic32_set(&synchro, 1);
+       rte_eal_mp_wait_lcore();
+       rte_atomic32_set(&synchro, 0);
+
+       if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
+               printf("Atomic test and set failed\n");
+               return -1;
+       }
+
+       printf("add/sub and return\n");
+
+       rte_atomic64_set(&a64, 0);
+       rte_atomic32_set(&a32, 0);
+       rte_atomic16_set(&a16, 0);
+       rte_atomic32_set(&count, 0);
+       rte_eal_mp_remote_launch(test_atomic_addsub_and_return, NULL,
+                                SKIP_MASTER);
+       rte_atomic32_set(&synchro, 1);
+       rte_eal_mp_wait_lcore();
+       rte_atomic32_set(&synchro, 0);
+
+       if (rte_atomic32_read(&count) != 0) {
+               printf("Atomic add/sub+return failed\n");
+               return -1;
+       }
+
+       /*
+        * Set a64, a32 and a16 with the same value of minus "number of slave
+        * lcores", launch all slave lcores to atomically increase by one and
+        * test them respectively.
+        * Each lcore should have only one chance to increase a64 by one and
+        * then check if it is equal to 0, but there should be only one lcore
+        * that finds that it is 0. It is similar for a32 and a16.
+        * Then a variable of "count", initialized to zero, is increased by
+        * one if a64, a32 or a16 is 0 after being increased and tested
+        * atomically.
+        * We can check if "count" is finally equal to 3 to see if all slave
+        * lcores performed "atomic inc and test" right.
+        */
+       printf("inc and test\n");
+
+       rte_atomic64_clear(&a64);
+       rte_atomic32_clear(&a32);
+       rte_atomic16_clear(&a16);
+       rte_atomic32_clear(&synchro);
+       rte_atomic32_clear(&count);
+
+       rte_atomic64_set(&a64, (int64_t)(1 - (int64_t)rte_lcore_count()));
+       rte_atomic32_set(&a32, (int32_t)(1 - (int32_t)rte_lcore_count()));
+       rte_atomic16_set(&a16, (int16_t)(1 - (int16_t)rte_lcore_count()));
+       rte_eal_mp_remote_launch(test_atomic_inc_and_test, NULL, SKIP_MASTER);
+       rte_atomic32_set(&synchro, 1);
+       rte_eal_mp_wait_lcore();
+       rte_atomic32_clear(&synchro);
+
+       if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
+               printf("Atomic inc and test failed %d\n", count.cnt);
+               return -1;
+       }
+
+       /*
+        * Same as above, but this time we set the values to "number of slave
+        * lcores", and decrement instead of increment.
+        */
+       printf("dec and test\n");
+
+       rte_atomic32_clear(&synchro);
+       rte_atomic32_clear(&count);
+
+       rte_atomic64_set(&a64, (int64_t)(rte_lcore_count() - 1));
+       rte_atomic32_set(&a32, (int32_t)(rte_lcore_count() - 1));
+       rte_atomic16_set(&a16, (int16_t)(rte_lcore_count() - 1));
+       rte_eal_mp_remote_launch(test_atomic_dec_and_test, NULL, SKIP_MASTER);
+       rte_atomic32_set(&synchro, 1);
+       rte_eal_mp_wait_lcore();
+       rte_atomic32_clear(&synchro);
+
+       if (rte_atomic32_read(&count) != NUM_ATOMIC_TYPES) {
+               printf("Atomic dec and test failed\n");
+               return -1;
+       }
+
+       return 0;
+}
+
diff --git a/app/test/test_byteorder.c b/app/test/test_byteorder.c
new file mode 100644 (file)
index 0000000..593e26f
--- /dev/null
@@ -0,0 +1,97 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_byteorder.h>
+
+#include "test.h"
+
+static volatile uint16_t u16 = 0x1337;
+static volatile uint32_t u32 = 0xdeadbeefUL;
+static volatile uint64_t u64 = 0xdeadcafebabefaceULL;
+
+/*
+ * Byteorder functions
+ * ===================
+ *
+ * - check that optimized byte swap functions are working for each
+ *   size (16, 32, 64 bits)
+ */
+
+int
+test_byteorder(void)
+{
+       uint16_t res_u16;
+       uint32_t res_u32;
+       uint64_t res_u64;
+
+       res_u16 = rte_bswap16(u16);
+       printf("%"PRIx16" -> %"PRIx16"\n", u16, res_u16);
+       if (res_u16 != 0x3713)
+               return -1;
+
+       res_u32 = rte_bswap32(u32);
+       printf("%"PRIx32" -> %"PRIx32"\n", u32, res_u32);
+       if (res_u32 != 0xefbeaddeUL)
+               return -1;
+
+       res_u64 = rte_bswap64(u64);
+       printf("%"PRIx64" -> %"PRIx64"\n", u64, res_u64);
+       if (res_u64 != 0xcefabebafecaaddeULL)
+               return -1;
+
+       res_u16 = rte_bswap16(0x1337);
+       printf("const %"PRIx16" -> %"PRIx16"\n", 0x1337, res_u16);
+       if (res_u16 != 0x3713)
+               return -1;
+
+       res_u32 = rte_bswap32(0xdeadbeefUL);
+       printf("const %"PRIx32" -> %"PRIx32"\n", (uint32_t) 0xdeadbeef, res_u32);
+       if (res_u32 != 0xefbeaddeUL)
+               return -1;
+
+       res_u64 = rte_bswap64(0xdeadcafebabefaceULL);
+       printf("const %"PRIx64" -> %"PRIx64"\n", (uint64_t) 0xdeadcafebabefaceULL, res_u64);
+       if (res_u64 != 0xcefabebafecaaddeULL)
+               return -1;
+
+       return 0;
+}
diff --git a/app/test/test_cpuflags.c b/app/test/test_cpuflags.c
new file mode 100644 (file)
index 0000000..d15d6e4
--- /dev/null
@@ -0,0 +1,134 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+
+#include <cmdline_parse.h>
+#include <errno.h>
+#include <stdint.h>
+#include <rte_cpuflags.h>
+#include <rte_debug.h>
+
+#include "test.h"
+
+
+/* convenience define */
+#define CHECK_FOR_FLAG(x) \
+                       result = rte_cpu_get_flag_enabled(x);    \
+                       printf("%s\n", cpu_flag_result(result)); \
+                       if (result == -ENOENT)                   \
+                               return -1;
+
+/*
+ * Helper function to display result
+ */
+static inline const char *
+cpu_flag_result(int result)
+{
+       switch (result) {
+       case 0:
+               return "NOT PRESENT";
+       case 1:
+               return "OK";
+       default:
+               return "ERROR";
+       }
+}
+
+
+
+/*
+ * CPUID test
+ * ===========
+ *
+ * - Check flags from different registers with rte_cpu_get_flag_enabled()
+ * - Check if register and CPUID functions fail properly
+ */
+
+int
+test_cpuflags(void)
+{
+       int result;
+       printf("\nChecking for flags from different registers...\n");
+
+       printf("Check for SSE:\t\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_SSE);
+
+       printf("Check for SSE2:\t\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_SSE2);
+
+       printf("Check for SSE3:\t\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_SSE3);
+
+       printf("Check for SSE4.1:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_SSE4_1);
+
+       printf("Check for SSE4.2:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_SSE4_2);
+
+       printf("Check for AVX:\t\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_AVX);
+
+       printf("Check for AVX2:\t\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_AVX2);
+
+       printf("Check for TRBOBST:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_TRBOBST);
+
+       printf("Check for ENERGY_EFF:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_ENERGY_EFF);
+
+       printf("Check for LAHF_SAHF:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_LAHF_SAHF);
+
+       printf("Check for 1GB_PG:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_1GB_PG);
+
+       printf("Check for INVTSC:\t");
+       CHECK_FOR_FLAG(RTE_CPUFLAG_INVTSC);
+
+
+
+       /*
+        * Check if invalid data is handled properly
+        */
+       printf("\nCheck for invalid flag:\t");
+       result = rte_cpu_get_flag_enabled(RTE_CPUFLAG_NUMFLAGS);
+       printf("%s\n", cpu_flag_result(result));
+       if (result != -ENOENT)
+               return -1;
+
+       return 0;
+}
diff --git a/app/test/test_cycles.c b/app/test/test_cycles.c
new file mode 100644 (file)
index 0000000..f480402
--- /dev/null
@@ -0,0 +1,94 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include "test.h"
+
+#define N 10000
+
+/*
+ * Cycles test
+ * ===========
+ *
+ * - Loop N times and check that the timer alway increments and
+ *   never decrements during this loop.
+ *
+ * - Wait one second using rte_usleep() and check that the increment
+ *   of cycles is correct with regard to the frequency of the timer.
+ */
+
+int
+test_cycles(void)
+{
+       unsigned i;
+       uint64_t start_cycles, cycles, prev_cycles;
+       uint64_t hz = rte_get_hpet_hz();
+       uint64_t max_inc = (hz / 100); /* 10 ms max between 2 reads */
+
+       /* check that the timer is always incrementing */
+       start_cycles = rte_get_hpet_cycles();
+       prev_cycles = start_cycles;
+       for (i=0; i<N; i++) {
+               cycles = rte_get_hpet_cycles();
+               if ((uint64_t)(cycles - prev_cycles) > max_inc) {
+                       printf("increment too high or going backwards\n");
+                       return -1;
+               }
+               prev_cycles = cycles;
+       }
+
+       /* check that waiting 1 second is precise */
+       prev_cycles = rte_get_hpet_cycles();
+       rte_delay_us(1000000);
+       cycles = rte_get_hpet_cycles();
+       if ((uint64_t)(cycles - prev_cycles) > (hz + max_inc)) {
+               printf("delay_us is not accurate\n");
+               return -1;
+       }
+       cycles = rte_get_hpet_cycles();
+       if ((uint64_t)(cycles - prev_cycles) < (hz)) {
+               printf("delay_us is not accurate\n");
+               return -1;
+       }
+
+       return 0;
+}
diff --git a/app/test/test_debug.c b/app/test/test_debug.c
new file mode 100644 (file)
index 0000000..153c562
--- /dev/null
@@ -0,0 +1,150 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_debug.h>
+#include <rte_common.h>
+
+#include "test.h"
+
+/*
+ * Debug test
+ * ==========
+ *
+ * - Call rte_dump_stack() and rte_dump_registers(). The result is not checked
+ *   currently, as the functions are not implemented on baremetal.
+ * - Check that rte_panic() terminates the program using a non-zero error code.
+ *   (Only implemented on linux, since it requires the fork() system call)
+ */
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+
+/* baremetal - don't test rte_panic or rte_exit */
+static int
+test_panic(void)
+{
+       return 0;
+}
+
+static int
+test_exit(void)
+{
+       return 0;
+}
+
+#else
+
+/* linuxapp - use fork() to test rte_panic() */
+static int
+test_panic(void)
+{
+       int pid;
+       int status;
+
+       pid = fork();
+
+       if (pid == 0)
+               rte_panic("Test Debug\n");
+       else if (pid < 0){
+               printf("Fork Failed\n");
+               return -1;
+       }
+       wait(&status);
+       if(status == 0){
+               printf("Child process terminated normally!\n");
+               return -1;
+       } else
+               printf("Child process terminated as expected - Test passed!\n");
+
+       return 0;
+}
+
+/* linuxapp - use fork() to test rte_exit() */
+static int
+test_exit_val(int exit_val)
+{
+       int pid;
+       int status;
+
+       pid = fork();
+
+       if (pid == 0)
+               rte_exit(exit_val, __func__);
+       else if (pid < 0){
+               printf("Fork Failed\n");
+               return -1;
+       }
+       wait(&status);
+       printf("Child process status: %d\n", status);
+       if(!WIFEXITED(status) || WEXITSTATUS(status) != (uint8_t)exit_val){
+               printf("Child process terminated with incorrect return code!\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+test_exit(void)
+{
+       int test_vals[] = { 0, 1, 2, 255, -1 };
+       unsigned i;
+       for (i = 0; i < sizeof(test_vals) / sizeof(test_vals[0]); i++){
+               if (test_exit_val(test_vals[i]) < 0)
+                       return -1;
+       }
+       printf("%s Passed\n", __func__);
+       return 0;
+}
+
+#endif
+
+int
+test_debug(void)
+{
+       rte_dump_stack();
+       rte_dump_registers();
+       if (test_panic() < 0)
+               return -1;
+       if (test_exit() < 0)
+               return -1;
+       return 0;
+}
diff --git a/app/test/test_eal_flags.c b/app/test/test_eal_flags.c
new file mode 100644 (file)
index 0000000..37b9aaf
--- /dev/null
@@ -0,0 +1,303 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdio.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+#include <string.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_debug.h>
+#include <rte_string_fns.h>
+
+#include "process.h"
+
+#define mp_flag "--proc-type=secondary"
+#define no_hpet "--no-hpet"
+#define no_huge "--no-huge"
+#define no_shconf "--no-shconf"
+#define launch_proc(ARGV) process_dup(ARGV, \
+               sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
+
+/*
+ * Test that the app doesn't run without invalid blacklist option.
+ * Final test ensures it does run with valid options as sanity check
+ */
+static int
+test_invalid_b_flag(void)
+{
+       const char *blinval[][8] = {
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "error"},
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0"},
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:error:0.1"},
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0.1error"},
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "error0:0:0.1"},
+               {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "0:0:0.1.2"},
+       };
+       /* Test with valid blacklist option */
+       const char *blval[] = {prgname, mp_flag, "-n", "1", "-c", "1", "-b", "FF:09:0B.3"};
+
+       int i;
+
+       for (i = 0; i != sizeof (blinval) / sizeof (blinval[0]); i++) {
+               if (launch_proc(blinval[i]) == 0) {
+                       printf("Error - process did run ok with invalid "
+                           "blacklist parameter\n");
+                       return -1;
+               }
+       }
+       if (launch_proc(blval) != 0) {
+               printf("Error - process did not run ok with valid blacklist value\n");
+               return -1;
+       }
+       return 0;
+}
+
+
+/*
+ * Test that the app doesn't run with invalid -r option.
+ */
+static int
+test_invalid_r_flag(void)
+{
+       const char *rinval[][8] = {
+                       {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "error"},
+                       {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "0"},
+                       {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "-1"},
+                       {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "17"},
+       };
+       /* Test with valid blacklist option */
+       const char *rval[] = {prgname, mp_flag, "-n", "1", "-c", "1", "-r", "16"};
+
+       int i;
+
+       for (i = 0; i != sizeof (rinval) / sizeof (rinval[0]); i++) {
+               if (launch_proc(rinval[i]) == 0) {
+                       printf("Error - process did run ok with invalid "
+                           "-r (rank) parameter\n");
+                       return -1;
+               }
+       }
+       if (launch_proc(rval) != 0) {
+               printf("Error - process did not run ok with valid -r (rank) value\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * Test that the app doesn't run without the coremask flag. In all cases
+ * should give an error and fail to run
+ */
+static int
+test_missing_c_flag(void)
+{
+       /* -c flag but no coremask value */
+       const char *argv1[] = { prgname, mp_flag, "-n", "3", "-c"};
+       /* No -c flag at all */
+       const char *argv2[] = { prgname, mp_flag, "-n", "3"};
+       /* bad coremask value */
+       const char *argv3[] = { prgname, mp_flag, "-n", "3", "-c", "error" };
+       /* sanity check of tests - valid coremask value */
+       const char *argv4[] = { prgname, mp_flag, "-n", "3", "-c", "1" };
+
+       if (launch_proc(argv1) == 0
+                       || launch_proc(argv2) == 0
+                       || launch_proc(argv3) == 0) {
+               printf("Error - process ran without error when missing -c flag\n");
+               return -1;
+       }
+       if (launch_proc(argv4) != 0) {
+               printf("Error - process did not run ok with valid coremask value\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * Test that the app doesn't run without the -n flag. In all cases
+ * should give an error and fail to run.
+ * Since -n is not compulsory for MP, we instead use --no-huge and --no-shconf
+ * flags.
+ */
+static int
+test_missing_n_flag(void)
+{
+       /* -n flag but no value */
+       const char *argv1[] = { prgname, no_huge, no_shconf, "-c", "1", "-n"};
+       /* No -n flag at all */
+       const char *argv2[] = { prgname, no_huge, no_shconf, "-c", "1"};
+       /* bad numeric value */
+       const char *argv3[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "e" };
+       /* out-of-range value */
+       const char *argv4[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "9" };
+       /* sanity test - check with good value */
+       const char *argv5[] = { prgname, no_huge, no_shconf, "-c", "1", "-n", "2" };
+
+       if (launch_proc(argv1) == 0
+                       || launch_proc(argv2) == 0
+                       || launch_proc(argv3) == 0
+                       || launch_proc(argv4) == 0) {
+               printf("Error - process ran without error when missing -n flag\n");
+               return -1;
+       }
+       if (launch_proc(argv5) != 0) {
+               printf("Error - process did not run ok with valid num-channel value\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * Test that the app runs with HPET, and without HPET
+ */
+static int
+test_no_hpet_flag(void)
+{
+       /* With --no-hpet */
+       const char *argv1[] = {prgname, mp_flag, no_hpet, "-c", "1", "-n", "2"};
+       /* Without --no-hpet */
+       const char *argv2[] = {prgname, mp_flag, "-c", "1", "-n", "2"};
+
+       if (launch_proc(argv1) != 0) {
+               printf("Error - process did not run ok with --no-hpet flag\n");
+               return -1;
+       }
+       if (launch_proc(argv2) != 0) {
+               printf("Error - process did not run ok without --no-hpet flag\n");
+               return -1;
+       }
+       return 0;
+}
+
+static int
+test_misc_flags(void)
+{
+       /* check that some general flags don't prevent things from working.
+        * All cases, apart from the first, app should run.
+        * No futher testing of output done.
+        */
+       /* sanity check - failure with invalid option */
+       const char *argv0[] = {prgname, mp_flag, "-c", "1", "--invalid-opt"};
+
+       /* With --no-pci */
+       const char *argv1[] = {prgname, mp_flag, "-c", "1", "--no-pci"};
+       /* With -v */
+       const char *argv2[] = {prgname, mp_flag, "-c", "1", "-v"};
+       /* With -m - ignored for secondary processes */
+       const char *argv3[] = {prgname, mp_flag, "-c", "1", "-m", "32"};
+
+       if (launch_proc(argv0) == 0) {
+               printf("Error - process ran ok with invalid flag\n");
+               return -1;
+       }
+       if (launch_proc(argv1) != 0) {
+               printf("Error - process did not run ok with --no-pci flag\n");
+               return -1;
+       }
+       if (launch_proc(argv2) != 0) {
+               printf("Error - process did not run ok with -v flag\n");
+               return -1;
+       }
+       if (launch_proc(argv3) != 0) {
+               printf("Error - process did not run ok with -m flag\n");
+               return -1;
+       }
+       return 0;
+}
+
+int
+test_eal_flags(void)
+{
+       int ret = 0;
+
+       ret = test_missing_c_flag();
+       if (ret < 0) {
+               printf("Error in test_missing_c_flag()");
+               return ret;
+       }
+
+       ret = test_missing_n_flag();
+       if (ret < 0) {
+               printf("Error in test_missing_n_flag()");
+               return ret;
+       }
+
+       ret = test_no_hpet_flag();
+       if (ret < 0) {
+               printf("Error in test_no_hpet_flag()");
+               return ret;
+       }
+
+       ret = test_invalid_b_flag();
+       if (ret < 0) {
+               printf("Error in test_invalid_b_flag()");
+               return ret;
+       }
+
+       ret = test_invalid_r_flag();
+       if (ret < 0) {
+               printf("Error in test_invalid_r_flag()");
+               return ret;
+       }
+
+       ret = test_misc_flags();
+       if (ret < 0) {
+               printf("Error in test_misc_flags()");
+               return ret;
+       }
+
+       return ret;
+}
+
+#else
+/* Baremetal version
+ * Multiprocess not applicable, so just return 0 always
+ */
+int
+test_eal_flags(void)
+{
+       printf("Multi-process not possible for baremetal, cannot test EAL flags\n");
+       return 0;
+}
+
+#endif
diff --git a/app/test/test_errno.c b/app/test/test_errno.c
new file mode 100644 (file)
index 0000000..4233dc1
--- /dev/null
@@ -0,0 +1,110 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <string.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+int
+test_errno(void)
+{
+       const char *rte_retval;
+       const char *libc_retval;
+       const char unknown_code_result[] = "Unknown error %d";
+       char expected_libc_retval[sizeof(unknown_code_result)+3];
+
+       /* use a small selection of standard errors for testing */
+       int std_errs[] = {EAGAIN, EBADF, EACCES, EINTR, EINVAL};
+       /* test ALL registered RTE error codes for overlap */
+       int rte_errs[] = {E_RTE_SECONDARY, E_RTE_NO_CONFIG, E_RTE_NO_TAILQ};
+       unsigned i;
+
+       rte_errno = 0;
+       if (rte_errno != 0)
+               return -1;
+       /* check for standard errors we return the same as libc */
+       for (i = 0; i < sizeof(std_errs)/sizeof(std_errs[0]); i++){
+               rte_retval = rte_strerror(std_errs[i]);
+               libc_retval = strerror(std_errs[i]);
+               printf("rte_strerror: '%s', strerror: '%s'\n",
+                               rte_retval, libc_retval);
+               if (strcmp(rte_retval, libc_retval) != 0)
+                       return -1;
+       }
+       /* for rte-specific errors ensure we return a different string
+        * and that the string for libc is for an unknown error
+        */
+       for (i = 0; i < sizeof(rte_errs)/sizeof(rte_errs[0]); i++){
+               rte_retval = rte_strerror(rte_errs[i]);
+               libc_retval = strerror(rte_errs[i]);
+               printf("rte_strerror: '%s', strerror: '%s'\n",
+                               rte_retval, libc_retval);
+               if (strcmp(rte_retval, libc_retval) == 0)
+                       return -1;
+               /* generate appropriate error string for unknown error number
+                * and then check that this is what we got back. If not, we have
+                * a duplicate error number that conflicts with errno.h */
+               rte_snprintf(expected_libc_retval, sizeof(expected_libc_retval),
+                               unknown_code_result, rte_errs[i]);
+               if (strcmp(expected_libc_retval, libc_retval) != 0){
+                       printf("Error, duplicate error code %d\n", rte_errs[i]);
+                       return -1;
+               }
+       }
+
+       /* ensure that beyond RTE_MAX_ERRNO, we always get an unknown code */
+       rte_retval = rte_strerror(RTE_MAX_ERRNO + 1);
+       libc_retval = strerror(RTE_MAX_ERRNO + 1);
+       rte_snprintf(expected_libc_retval, sizeof(expected_libc_retval),
+                       unknown_code_result, RTE_MAX_ERRNO + 1);
+       printf("rte_strerror: '%s', strerror: '%s'\n",
+                       rte_retval, libc_retval);
+       if ((strcmp(rte_retval, libc_retval) != 0) ||
+                       (strcmp(expected_libc_retval, libc_retval) != 0)){
+               printf("Failed test for RTE_MAX_ERRNO + 1 value\n");
+               return -1;
+       }
+
+       return 0;
+}
diff --git a/app/test/test_hash.c b/app/test/test_hash.c
new file mode 100644 (file)
index 0000000..5992fa3
--- /dev/null
@@ -0,0 +1,1785 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_jhash.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_fbk_hash.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+#ifdef RTE_LIBRTE_HASH
+
+/* Types of hash table performance test that can be performed */
+enum hash_test_t {
+       ADD_ON_EMPTY,           /*< Add keys to empty table */
+       DELETE_ON_EMPTY,        /*< Attempt to delete keys from empty table */
+       LOOKUP_ON_EMPTY,        /*< Attempt to find keys in an empty table */
+       ADD_UPDATE,             /*< Add/update keys in a full table */
+       DELETE,                 /*< Delete keys from a full table */
+       LOOKUP                  /*< Find keys in a full table */
+};
+
+/* Function type for hash table operations. */
+typedef int32_t (*hash_operation)(const struct rte_hash *h, const void *key);
+
+/* Structure to hold parameters used to run a hash table performance test */
+struct tbl_perf_test_params {
+       enum hash_test_t test_type;
+       uint32_t num_iterations;
+       uint32_t entries;
+       uint32_t bucket_entries;
+       uint32_t key_len;
+       rte_hash_function hash_func;
+       uint32_t hash_func_init_val;
+};
+
+#define ITERATIONS 10000
+#define LOCAL_FBK_HASH_ENTRIES_MAX (1 << 15)
+
+/*******************************************************************************
+ * Hash table performance test configuration section.
+ */
+struct tbl_perf_test_params tbl_perf_params[] =
+{
+/* Small table, add */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |     HashFunc | InitVal */
+{ ADD_ON_EMPTY,        1024,     1024,           1,      16,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      16,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      16,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      16,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      16,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      32,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      32,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      32,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      32,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      32,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      48,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      48,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      48,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      48,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      48,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      64,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      64,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      64,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      64,     rte_jhash,  0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      64,     rte_jhash,  0},
+/* Small table, update */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |     HashFunc | InitVal */
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      16,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      16,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      16,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      16,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      16,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      32,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      32,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      32,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      32,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      32,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      48,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      48,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      48,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      48,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      48,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      64,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      64,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      64,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      64,     rte_jhash,  0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      64,     rte_jhash,  0},
+/* Small table, lookup */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |     HashFunc | InitVal */
+{       LOOKUP,  ITERATIONS,     1024,           1,      16,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      16,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      16,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      16,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      16,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      32,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      32,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      32,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      32,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      32,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      48,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      48,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      48,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      48,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      48,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      64,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      64,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      64,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      64,     rte_jhash,  0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      64,     rte_jhash,  0},
+/* Big table, add */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      16,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      16,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      16,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      16,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      16,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      32,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      32,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      32,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      32,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      32,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      48,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      48,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      48,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      48,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      48,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      64,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      64,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      64,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      64,    rte_jhash,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      64,    rte_jhash,   0},
+/* Big table, update */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      16,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      16,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      16,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      16,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      16,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      32,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      32,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      32,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      32,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      32,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      48,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      48,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      48,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      48,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      48,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      64,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      64,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      64,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      64,    rte_jhash,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      64,    rte_jhash,   0},
+/* Big table, lookup */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{       LOOKUP,  ITERATIONS,  1048576,           1,      16,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      16,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      16,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      16,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      16,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      32,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      32,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      32,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      32,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      32,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      48,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      48,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      48,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      48,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      48,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      64,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      64,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      64,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      64,    rte_jhash,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      64,    rte_jhash,   0},
+
+/* Small table, add */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{ ADD_ON_EMPTY,        1024,     1024,           1,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           1,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           2,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           4,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,           8,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,        1024,     1024,          16,      64, rte_hash_crc,   0},
+/* Small table, update */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           1,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           2,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           4,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,           8,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,     1024,          16,      64, rte_hash_crc,   0},
+/* Small table, lookup */
+/*  Test type | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{       LOOKUP,  ITERATIONS,     1024,           1,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           1,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           2,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           4,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,           8,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,     1024,          16,      64, rte_hash_crc,   0},
+/* Big table, add */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen |    HashFunc | InitVal */
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      16, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      32, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      48, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           1,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           2,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           4,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,           8,      64, rte_hash_crc,   0},
+{ ADD_ON_EMPTY,     1048576,  1048576,          16,      64, rte_hash_crc,   0},
+/* Big table, update */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      16, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      32, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      48, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           1,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           2,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           4,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,           8,      64, rte_hash_crc,   0},
+{   ADD_UPDATE,  ITERATIONS,  1048576,          16,      64, rte_hash_crc,   0},
+/* Big table, lookup */
+/* Test type  | Iterations | Entries | BucketSize | KeyLen | HashFunc | InitVal */
+{       LOOKUP,  ITERATIONS,  1048576,           1,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      16, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      32, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      48, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           1,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           2,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           4,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,           8,      64, rte_hash_crc,   0},
+{       LOOKUP,  ITERATIONS,  1048576,          16,      64, rte_hash_crc,   0},
+};
+
+/******************************************************************************/
+
+/*******************************************************************************
+ * Hash function performance test configuration section. Each performance test
+ * will be performed HASHTEST_ITERATIONS times.
+ *
+ * The five arrays below control what tests are performed. Every combination
+ * from the array entries is tested.
+ */
+#define HASHTEST_ITERATIONS 1000000
+
+#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
+static rte_hash_function hashtest_funcs[] = {rte_jhash, rte_hash_crc};
+#else
+static rte_hash_function hashtest_funcs[] = {rte_jhash};
+#endif
+static uint32_t hashtest_initvals[] = {0};
+static uint32_t hashtest_key_lens[] = {2, 4, 5, 6, 7, 8, 10, 11, 15, 16, 21, 31, 32, 33, 63, 64};
+/******************************************************************************/
+
+/*
+ * Check condition and return an error if true. Assumes that "handle" is the
+ * name of the hash structure pointer to be freed.
+ */
+#define RETURN_IF_ERROR(cond, str, ...) do {                           \
+       if (cond) {                                                     \
+               printf("ERROR line %d: " str "\n", __LINE__, ##__VA_ARGS__); \
+               if (handle) rte_hash_free(handle);                      \
+               return -1;                                              \
+       }                                                               \
+} while(0)
+
+#define RETURN_IF_ERROR_FBK(cond, str, ...) do {                               \
+       if (cond) {                                                     \
+               printf("ERROR line %d: " str "\n", __LINE__, ##__VA_ARGS__); \
+               if (handle) rte_fbk_hash_free(handle);                  \
+               return -1;                                              \
+       }                                                               \
+} while(0)
+
+/* 5-tuple key type */
+struct flow_key {
+       uint32_t ip_src;
+       uint32_t ip_dst;
+       uint16_t port_src;
+       uint16_t port_dst;
+       uint8_t proto;
+} __attribute__((packed));
+
+/*
+ * Hash function that always returns the same value, to easily test what
+ * happens when a bucket is full.
+ */
+static uint32_t pseudo_hash(__attribute__((unused)) const void *keys,
+                           __attribute__((unused)) uint32_t key_len,
+                           __attribute__((unused)) uint32_t init_val)
+{
+       return 3;
+}
+
+/*
+ * Print out result of unit test hash operation.
+ */
+#if defined(UNIT_TEST_HASH_VERBOSE)
+static void print_key_info(const char *msg, const struct flow_key *key,
+                                                               int32_t pos)
+{
+       uint8_t *p = (uint8_t *)key;
+       unsigned i;
+
+       printf("%s key:0x", msg);
+       for (i = 0; i < sizeof(struct flow_key); i++) {
+               printf("%02X", p[i]);
+       }
+       printf(" @ pos %d\n", pos);
+}
+#else
+static void print_key_info(__attribute__((unused)) const char *msg,
+               __attribute__((unused)) const struct flow_key *key,
+               __attribute__((unused)) int32_t pos)
+{
+}
+#endif
+
+/* Keys used by unit test functions */
+static struct flow_key keys[5] = { {
+       .ip_src = IPv4(0x03, 0x02, 0x01, 0x00),
+       .ip_dst = IPv4(0x07, 0x06, 0x05, 0x04),
+       .port_src = 0x0908,
+       .port_dst = 0x0b0a,
+       .proto = 0x0c,
+}, {
+       .ip_src = IPv4(0x13, 0x12, 0x11, 0x10),
+       .ip_dst = IPv4(0x17, 0x16, 0x15, 0x14),
+       .port_src = 0x1918,
+       .port_dst = 0x1b1a,
+       .proto = 0x1c,
+}, {
+       .ip_src = IPv4(0x23, 0x22, 0x21, 0x20),
+       .ip_dst = IPv4(0x27, 0x26, 0x25, 0x24),
+       .port_src = 0x2928,
+       .port_dst = 0x2b2a,
+       .proto = 0x2c,
+}, {
+       .ip_src = IPv4(0x33, 0x32, 0x31, 0x30),
+       .ip_dst = IPv4(0x37, 0x36, 0x35, 0x34),
+       .port_src = 0x3938,
+       .port_dst = 0x3b3a,
+       .proto = 0x3c,
+}, {
+       .ip_src = IPv4(0x43, 0x42, 0x41, 0x40),
+       .ip_dst = IPv4(0x47, 0x46, 0x45, 0x44),
+       .port_src = 0x4948,
+       .port_dst = 0x4b4a,
+       .proto = 0x4c,
+} };
+
+/* Parameters used for hash table in unit test functions. Name set later. */
+static struct rte_hash_parameters ut_params = {
+       .entries = 64,
+       .bucket_entries = 4,
+       .key_len = sizeof(struct flow_key), /* 13 */
+       .hash_func = rte_jhash,
+       .hash_func_init_val = 0,
+       .socket_id = 0,
+};
+
+/*
+ * Basic sequence of operations for a single key:
+ *     - add
+ *     - lookup (hit)
+ *     - delete
+ *     - lookup (miss)
+ */
+static int test_add_delete(void)
+{
+       struct rte_hash *handle;
+       int pos0, expectedPos0;
+
+       ut_params.name = "test1";
+       handle = rte_hash_create(&ut_params);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       pos0 = rte_hash_add_key(handle, &keys[0]);
+       print_key_info("Add", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 < 0, "failed to add key (pos0=%d)", pos0);
+       expectedPos0 = pos0;
+
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       print_key_info("Lkp", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to find key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_del_key(handle, &keys[0]);
+       print_key_info("Del", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to delete key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       print_key_info("Lkp", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != -ENOENT,
+                       "fail: found key after deleting! (pos0=%d)", pos0);
+
+       rte_hash_free(handle);
+       return 0;
+}
+
+/*
+ * Sequence of operations for a single key:
+ *     - delete: miss
+ *     - add
+ *     - lookup: hit
+ *     - add: update
+ *     - lookup: hit (updated data)
+ *     - delete: hit
+ *     - delete: miss
+ *     - lookup: miss
+ */
+static int test_add_update_delete(void)
+{
+       struct rte_hash *handle;
+       int pos0, expectedPos0;
+
+       ut_params.name = "test2";
+       handle = rte_hash_create(&ut_params);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       pos0 = rte_hash_del_key(handle, &keys[0]);
+       print_key_info("Del", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != -ENOENT,
+                       "fail: found non-existent key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_add_key(handle, &keys[0]);
+       print_key_info("Add", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 < 0, "failed to add key (pos0=%d)", pos0);
+       expectedPos0 = pos0;
+
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       print_key_info("Lkp", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to find key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_add_key(handle, &keys[0]);
+       print_key_info("Add", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to re-add key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       print_key_info("Lkp", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to find key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_del_key(handle, &keys[0]);
+       print_key_info("Del", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != expectedPos0,
+                       "failed to delete key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_del_key(handle, &keys[0]);
+       print_key_info("Del", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != -ENOENT,
+                       "fail: deleted already deleted key (pos0=%d)", pos0);
+
+       pos0 = rte_hash_lookup(handle, &keys[0]);
+       print_key_info("Lkp", &keys[0], pos0);
+       RETURN_IF_ERROR(pos0 != -ENOENT,
+                       "fail: found key after deleting! (pos0=%d)", pos0);
+
+       rte_hash_free(handle);
+       return 0;
+}
+
+/*
+ * Sequence of operations for find existing hash table
+ *
+ *  - create table
+ *  - find existing table: hit
+ *  - find non-existing table: miss
+ *
+ */
+static int test_hash_find_existing(void)
+{
+       struct rte_hash *handle = NULL, *result = NULL;
+
+       /* Create hash table. */
+       ut_params.name = "hash_find_existing";
+       handle = rte_hash_create(&ut_params);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       /* Try to find existing hash table */
+       result = rte_hash_find_existing("hash_find_existing");
+       RETURN_IF_ERROR(result != handle, "could not find existing hash table");
+
+       /* Try to find non-existing hash table */
+       result = rte_hash_find_existing("hash_find_non_existing");
+       RETURN_IF_ERROR(!(result == NULL), "found table that shouldn't exist");
+
+       /* Cleanup. */
+       rte_hash_free(handle);
+
+       return 0;
+}
+
+/*
+ * Sequence of operations for 5 keys
+ *     - add keys
+ *     - lookup keys: hit
+ *     - add keys (update)
+ *     - lookup keys: hit (updated data)
+ *     - delete keys : hit
+ *     - lookup keys: miss
+ */
+static int test_five_keys(void)
+{
+       struct rte_hash *handle;
+       const void *key_array[5] = {0};
+       int pos[5];
+       int expected_pos[5];
+       unsigned i;
+       int ret;
+
+       ut_params.name = "test3";
+       handle = rte_hash_create(&ut_params);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       /* Add */
+       for (i = 0; i < 5; i++) {
+               pos[i] = rte_hash_add_key(handle, &keys[i]);
+               print_key_info("Add", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] < 0,
+                               "failed to add key (pos[%u]=%d)", i, pos[i]);
+               expected_pos[i] = pos[i];
+       }
+
+       /* Lookup */
+       for(i = 0; i < 5; i++)
+               key_array[i] = &keys[i];
+
+       ret = rte_hash_lookup_multi(handle, &key_array[0], 5, (int32_t *)pos);
+       if(ret == 0)
+               for(i = 0; i < 5; i++) {
+                       print_key_info("Lkp", key_array[i], pos[i]);
+                       RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                                       "failed to find key (pos[%u]=%d)", i, pos[i]);
+               }
+
+       /* Add - update */
+       for (i = 0; i < 5; i++) {
+               pos[i] = rte_hash_add_key(handle, &keys[i]);
+               print_key_info("Add", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                               "failed to add key (pos[%u]=%d)", i, pos[i]);
+       }
+
+       /* Lookup */
+       for (i = 0; i < 5; i++) {
+               pos[i] = rte_hash_lookup(handle, &keys[i]);
+               print_key_info("Lkp", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                               "failed to find key (pos[%u]=%d)", i, pos[i]);
+       }
+
+       /* Delete */
+       for (i = 0; i < 5; i++) {
+               pos[i] = rte_hash_del_key(handle, &keys[i]);
+               print_key_info("Del", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                               "failed to delete key (pos[%u]=%d)", i, pos[i]);
+       }
+
+       /* Lookup */
+       for (i = 0; i < 5; i++) {
+               pos[i] = rte_hash_lookup(handle, &keys[i]);
+               print_key_info("Lkp", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != -ENOENT,
+                               "failed to find key (pos[%u]=%d)", i, pos[i]);
+       }
+
+       rte_hash_free(handle);
+
+       return 0;
+}
+
+/*
+ * Add keys to the same bucket until bucket full.
+ *     - add 5 keys to the same bucket (hash created with 4 keys per bucket):
+ *             first 4 successful, 5th unsuccessful
+ *     - lookup the 5 keys: 4 hits, 1 miss
+ *     - add the 5 keys again: 4 OK, one error as bucket is full
+ *     - lookup the 5 keys: 4 hits (updated data), 1 miss
+ *     - delete the 5 keys: 5 OK (even if the 5th is not in the table)
+ *     - lookup the 5 keys: 5 misses
+ *     - add the 5th key: OK
+ *     - lookup the 5th key: hit
+ */
+static int test_full_bucket(void)
+{
+       struct rte_hash_parameters params_pseudo_hash = {
+               .name = "test4",
+               .entries = 64,
+               .bucket_entries = 4,
+               .key_len = sizeof(struct flow_key), /* 13 */
+               .hash_func = pseudo_hash,
+               .hash_func_init_val = 0,
+               .socket_id = 0,
+       };
+       struct rte_hash *handle;
+       int pos[5];
+       int expected_pos[5];
+       unsigned i;
+
+       handle = rte_hash_create(&params_pseudo_hash);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       /* Fill bucket*/
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_add_key(handle, &keys[i]);
+               print_key_info("Add", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] < 0,
+                       "failed to add key (pos[%u]=%d)", i, pos[i]);
+               expected_pos[i] = pos[i];
+       }
+       /* This shouldn't work because the bucket is full */
+       pos[4] = rte_hash_add_key(handle, &keys[4]);
+       print_key_info("Add", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != -ENOSPC,
+                       "fail: added key to full bucket (pos[4]=%d)", pos[4]);
+
+       /* Lookup */
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_lookup(handle, &keys[i]);
+               print_key_info("Lkp", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                       "failed to find key (pos[%u]=%d)", i, pos[i]);
+       }
+       pos[4] = rte_hash_lookup(handle, &keys[4]);
+       print_key_info("Lkp", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != -ENOENT,
+                       "fail: found non-existent key (pos[4]=%d)", pos[4]);
+
+       /* Add - update */
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_add_key(handle, &keys[i]);
+               print_key_info("Add", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                       "failed to add key (pos[%u]=%d)", i, pos[i]);
+       }
+       pos[4] = rte_hash_add_key(handle, &keys[4]);
+       print_key_info("Add", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != -ENOSPC,
+                       "fail: added key to full bucket (pos[4]=%d)", pos[4]);
+
+       /* Lookup */
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_lookup(handle, &keys[i]);
+               print_key_info("Lkp", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                       "failed to find key (pos[%u]=%d)", i, pos[i]);
+       }
+       pos[4] = rte_hash_lookup(handle, &keys[4]);
+       print_key_info("Lkp", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != -ENOENT,
+                       "fail: found non-existent key (pos[4]=%d)", pos[4]);
+
+       /* Delete 1 key, check other keys are still found */
+       pos[1] = rte_hash_del_key(handle, &keys[1]);
+       print_key_info("Del", &keys[1], pos[1]);
+       RETURN_IF_ERROR(pos[1] != expected_pos[1],
+                       "failed to delete key (pos[1]=%d)", pos[1]);
+       pos[3] = rte_hash_lookup(handle, &keys[3]);
+       print_key_info("Lkp", &keys[3], pos[3]);
+       RETURN_IF_ERROR(pos[3] != expected_pos[3],
+                       "failed lookup after deleting key from same bucket "
+                       "(pos[3]=%d)", pos[3]);
+
+       /* Go back to previous state */
+       pos[1] = rte_hash_add_key(handle, &keys[1]);
+       print_key_info("Add", &keys[1], pos[1]);
+       expected_pos[1] = pos[1];
+       RETURN_IF_ERROR(pos[1] < 0, "failed to add key (pos[1]=%d)", pos[1]);
+
+       /* Delete */
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_del_key(handle, &keys[i]);
+               print_key_info("Del", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != expected_pos[i],
+                       "failed to delete key (pos[%u]=%d)", i, pos[i]);
+       }
+       pos[4] = rte_hash_del_key(handle, &keys[4]);
+       print_key_info("Del", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != -ENOENT,
+                       "fail: deleted non-existent key (pos[4]=%d)", pos[4]);
+
+       /* Lookup */
+       for (i = 0; i < 4; i++) {
+               pos[i] = rte_hash_lookup(handle, &keys[i]);
+               print_key_info("Lkp", &keys[i], pos[i]);
+               RETURN_IF_ERROR(pos[i] != -ENOENT,
+                       "fail: found non-existent key (pos[%u]=%d)", i, pos[i]);
+       }
+
+       /* Add and lookup the 5th key */
+       pos[4] = rte_hash_add_key(handle, &keys[4]);
+       print_key_info("Add", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] < 0, "failed to add key (pos[4]=%d)", pos[4]);
+       expected_pos[4] = pos[4];
+       pos[4] = rte_hash_lookup(handle, &keys[4]);
+       print_key_info("Lkp", &keys[4], pos[4]);
+       RETURN_IF_ERROR(pos[4] != expected_pos[4],
+                       "failed to find key (pos[4]=%d)", pos[4]);
+
+       rte_hash_free(handle);
+
+       /* Cover the NULL case. */
+       rte_hash_free(0);
+       return 0;
+}
+
+/*
+ * To help print out name of hash functions.
+ */
+static const char *get_hash_name(rte_hash_function f)
+{
+       if (f == rte_jhash)
+               return "jhash";
+
+       if (f == rte_hash_crc)
+               return "rte_hash_crc";
+
+       return "UnknownHash";
+}
+
+/*
+ * Find average of array of numbers.
+ */
+static double
+get_avg(const uint32_t *array, uint32_t size)
+{
+       double sum = 0;
+       unsigned i;
+       for (i = 0; i < size; i++)
+               sum += array[i];
+       return sum / (double)size;
+}
+
+/*
+ * Do a single performance test, of one type of operation.
+ *
+ * @param h
+ *   hash table to run test on
+ * @param func
+ *   function to call (add, delete or lookup function)
+ * @param avg_occupancy
+ *   The average number of entries in each bucket of the hash table
+ * @param invalid_pos_count
+ *   The amount of errors (e.g. due to a full bucket).
+ * @return
+ *   The average number of ticks per hash function call. A negative number
+ *   signifies failure.
+ */
+static double
+run_single_tbl_perf_test(const struct rte_hash *h, hash_operation func,
+               const struct tbl_perf_test_params *params, double *avg_occupancy,
+               uint32_t *invalid_pos_count)
+{
+       uint64_t begin, end, ticks = 0;
+       uint8_t *key = NULL;
+       uint32_t *bucket_occupancies = NULL;
+       uint32_t num_buckets, i, j;
+       int32_t pos;
+
+       /* Initialise */
+       num_buckets = params->entries / params->bucket_entries;
+       key = (uint8_t *) rte_zmalloc("hash key",
+                       params->key_len * sizeof(uint8_t), 16);
+       if (key == NULL)
+               return -1;
+
+       bucket_occupancies = (uint32_t *) rte_zmalloc("bucket occupancies",
+                       num_buckets * sizeof(uint32_t), 16);
+       if (bucket_occupancies == NULL) {
+               rte_free(key);
+               return -1;
+       }
+
+       ticks = 0;
+       *invalid_pos_count = 0;
+
+       for (i = 0; i < params->num_iterations; i++) {
+               /* Prepare inputs for the current iteration */
+               for (j = 0; j < params->key_len; j++)
+                       key[j] = (uint8_t) rte_rand();
+
+               /* Perform operation, and measure time it takes */
+               begin = rte_rdtsc();
+               pos = func(h, key);
+               end = rte_rdtsc();
+               ticks += end - begin;
+
+               /* Other work per iteration */
+               if (pos < 0)
+                       *invalid_pos_count += 1;
+               else
+                       bucket_occupancies[pos / params->bucket_entries]++;
+       }
+       *avg_occupancy = get_avg(bucket_occupancies, num_buckets);
+
+       rte_free(bucket_occupancies);
+       rte_free(key);
+
+       return (double)ticks / params->num_iterations;
+}
+
+/*
+ * To help print out what tests are being done.
+ */
+static const char *
+get_tbl_perf_test_desc(enum hash_test_t type)
+{
+       switch (type){
+       case ADD_ON_EMPTY: return "Add on Empty";
+       case DELETE_ON_EMPTY: return "Delete on Empty";
+       case LOOKUP_ON_EMPTY: return "Lookup on Empty";
+       case ADD_UPDATE: return "Add Update";
+       case DELETE: return "Delete";
+       case LOOKUP: return "Lookup";
+       default: return "UNKNOWN";
+       }
+}
+
+/*
+ * Run a hash table performance test based on params.
+ */
+static int
+run_tbl_perf_test(struct tbl_perf_test_params *params)
+{
+       static unsigned calledCount = 5;
+       struct rte_hash_parameters hash_params = {
+               .entries = params->entries,
+               .bucket_entries = params->bucket_entries,
+               .key_len = params->key_len,
+               .hash_func = params->hash_func,
+               .hash_func_init_val = params->hash_func_init_val,
+               .socket_id = 0,
+       };
+       struct rte_hash *handle;
+       double avg_occupancy = 0, ticks = 0;
+       uint32_t num_iterations, invalid_pos;
+       char name[RTE_HASH_NAMESIZE];
+       char hashname[RTE_HASH_NAMESIZE];
+
+       rte_snprintf(name, 32, "test%u", calledCount++);
+       hash_params.name = name;
+
+       handle = rte_hash_create(&hash_params);
+       RETURN_IF_ERROR(handle == NULL, "hash creation failed");
+
+       switch (params->test_type){
+       case ADD_ON_EMPTY:
+               ticks = run_single_tbl_perf_test(handle, rte_hash_add_key,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       case DELETE_ON_EMPTY:
+               ticks = run_single_tbl_perf_test(handle, rte_hash_del_key,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       case LOOKUP_ON_EMPTY:
+               ticks = run_single_tbl_perf_test(handle, rte_hash_lookup,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       case ADD_UPDATE:
+               num_iterations = params->num_iterations;
+               params->num_iterations = params->entries;
+               run_single_tbl_perf_test(handle, rte_hash_add_key, params,
+                               &avg_occupancy, &invalid_pos);
+               params->num_iterations = num_iterations;
+               ticks = run_single_tbl_perf_test(handle, rte_hash_add_key,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       case DELETE:
+               num_iterations = params->num_iterations;
+               params->num_iterations = params->entries;
+               run_single_tbl_perf_test(handle, rte_hash_add_key, params,
+                               &avg_occupancy, &invalid_pos);
+
+               params->num_iterations = num_iterations;
+               ticks = run_single_tbl_perf_test(handle, rte_hash_del_key,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       case LOOKUP:
+               num_iterations = params->num_iterations;
+               params->num_iterations = params->entries;
+               run_single_tbl_perf_test(handle, rte_hash_add_key, params,
+                               &avg_occupancy, &invalid_pos);
+
+               params->num_iterations = num_iterations;
+               ticks = run_single_tbl_perf_test(handle, rte_hash_lookup,
+                               params, &avg_occupancy, &invalid_pos);
+               break;
+       default: return -1;
+       }
+
+       rte_snprintf(hashname, RTE_HASH_NAMESIZE, "%s", get_hash_name(params->hash_func));
+
+       printf("%-12s, %-15s, %-16u, %-7u, %-18u, %-8u, %-19.2f, %.2f\n",
+               hashname,
+               get_tbl_perf_test_desc(params->test_type),
+               (unsigned) params->key_len,
+               (unsigned) params->entries,
+               (unsigned) params->bucket_entries,
+               (unsigned) invalid_pos,
+               avg_occupancy,
+               ticks
+       );
+
+       /* Free */
+       rte_hash_free(handle);
+       return 0;
+}
+
+/*
+ * Run all hash table performance tests.
+ */
+static int run_all_tbl_perf_tests(void)
+{
+       unsigned i;
+
+       printf(" *** Hash table performance test results ***\n");
+       printf("Hash Func.  , Operation      , Key size (bytes), Entries, "
+              "Entries per bucket, Errors  , Avg. bucket entries, Ticks/Op.\n");
+
+       /* Loop through every combination of test parameters */
+       for (i = 0;
+            i < sizeof(tbl_perf_params) / sizeof(struct tbl_perf_test_params);
+            i++) {
+
+               /* Perform test */
+               if (run_tbl_perf_test(&tbl_perf_params[i]) < 0)
+                       return -1;
+       }
+       return 0;
+}
+
+/*
+ * Test a hash function.
+ */
+static void run_hash_func_test(rte_hash_function f, uint32_t init_val,
+               uint32_t key_len)
+{
+       static uint8_t key[RTE_HASH_KEY_LENGTH_MAX];
+       uint64_t ticks = 0, start, end;
+       unsigned i, j;
+
+       for (i = 0; i < HASHTEST_ITERATIONS; i++) {
+
+               for (j = 0; j < key_len; j++)
+                       key[j] = (uint8_t) rte_rand();
+
+               start = rte_rdtsc();
+               f(key, key_len, init_val);
+               end = rte_rdtsc();
+               ticks += end - start;
+       }
+
+       printf("%-12s, %-18u, %-13u, %.02f\n", get_hash_name(f), (unsigned) key_len,
+                       (unsigned) init_val, (double)ticks / HASHTEST_ITERATIONS);
+}
+
+/*
+ * Test all hash functions.
+ */
+static void run_hash_func_tests(void)
+{
+       unsigned i, j, k;
+
+       printf("\n\n *** Hash function performance test results ***\n");
+       printf(" Number of iterations for each test = %d\n",
+                       HASHTEST_ITERATIONS);
+       printf("Hash Func.  , Key Length (bytes), Initial value, Ticks/Op.\n");
+
+       for (i = 0;
+            i < sizeof(hashtest_funcs) / sizeof(rte_hash_function);
+            i++) {
+               for (j = 0;
+                    j < sizeof(hashtest_initvals) / sizeof(uint32_t);
+                    j++) {
+                       for (k = 0;
+                            k < sizeof(hashtest_key_lens) / sizeof(uint32_t);
+                            k++) {
+                               run_hash_func_test(hashtest_funcs[i],
+                                               hashtest_initvals[j],
+                                               hashtest_key_lens[k]);
+                       }
+               }
+       }
+}
+
+/******************************************************************************/
+static int
+fbk_hash_unit_test(void)
+{
+       struct rte_fbk_hash_params params = {
+               .name = "fbk_hash_test",
+               .entries = LOCAL_FBK_HASH_ENTRIES_MAX,
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_1 = {
+               .name = "invalid_1",
+               .entries = LOCAL_FBK_HASH_ENTRIES_MAX + 1, /* Not power of 2 */
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_2 = {
+               .name = "invalid_4",
+               .entries = 4,
+               .entries_per_bucket = 3,         /* Not power of 2 */
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_3 = {
+               .name = "invalid_2",
+               .entries = 0,                    /* Entries is 0 */
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_4 = {
+               .name = "invalid_3",
+               .entries = LOCAL_FBK_HASH_ENTRIES_MAX,
+               .entries_per_bucket = 0,         /* Entries per bucket is 0 */
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_5 = {
+               .name = "invalid_4",
+               .entries = 4,
+               .entries_per_bucket = 8,         /* Entries per bucket > entries */
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params invalid_params_6 = {
+               .name = "invalid_5",
+               .entries = RTE_FBK_HASH_ENTRIES_MAX * 2,   /* Entries > max allowed */
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+       };
+
+       struct rte_fbk_hash_params params_jhash = {
+               .name = "valid",
+               .entries = LOCAL_FBK_HASH_ENTRIES_MAX,
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+               .hash_func = rte_jhash_1word,              /* Tests for different hash_func */
+               .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
+       };
+
+       struct rte_fbk_hash_params params_nohash = {
+               .name = "valid nohash",
+               .entries = LOCAL_FBK_HASH_ENTRIES_MAX,
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+               .hash_func = 0,                            /* Tests for null hash_func */
+               .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
+       };
+
+       struct rte_fbk_hash_table *handle;
+       uint32_t keys[5] =
+               {0xc6e18639, 0xe67c201c, 0xd4c8cffd, 0x44728691, 0xd5430fa9};
+       uint16_t vals[5] = {28108, 5699, 38490, 2166, 61571};
+       int status;
+       unsigned i;
+       double used_entries;
+
+       /* Try creating hashes with invalid parameters */
+       handle = rte_fbk_hash_create(&invalid_params_1);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       handle = rte_fbk_hash_create(&invalid_params_2);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       handle = rte_fbk_hash_create(&invalid_params_3);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       handle = rte_fbk_hash_create(&invalid_params_4);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       handle = rte_fbk_hash_create(&invalid_params_5);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       handle = rte_fbk_hash_create(&invalid_params_6);
+       RETURN_IF_ERROR_FBK(handle != NULL, "fbk hash creation should have failed");
+
+       /* Create empty jhash hash. */
+       handle = rte_fbk_hash_create(&params_jhash);
+       RETURN_IF_ERROR_FBK(handle == NULL, "fbk jhash hash creation failed");
+
+       /* Cleanup. */
+       rte_fbk_hash_free(handle);
+
+       /* Create empty jhash hash. */
+       handle = rte_fbk_hash_create(&params_nohash);
+       RETURN_IF_ERROR_FBK(handle == NULL, "fbk nohash hash creation failed");
+
+       /* Cleanup. */
+       rte_fbk_hash_free(handle);
+
+       /* Create empty hash. */
+       handle = rte_fbk_hash_create(&params);
+       RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed");
+
+       used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX;
+       RETURN_IF_ERROR_FBK((unsigned)used_entries != 0, \
+                               "load factor right after creation is not zero but it should be");
+       /* Add keys. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_add_key(handle, keys[i], vals[i]);
+               RETURN_IF_ERROR_FBK(status != 0, "fbk hash add failed");
+       }
+
+       used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX;
+       RETURN_IF_ERROR_FBK((unsigned)used_entries != (unsigned)((((double)5)/LOCAL_FBK_HASH_ENTRIES_MAX)*LOCAL_FBK_HASH_ENTRIES_MAX), \
+                               "load factor now is not as expected");
+       /* Find value of added keys. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_lookup(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status != vals[i],
+                               "fbk hash lookup failed");
+       }
+
+       /* Change value of added keys. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_add_key(handle, keys[i], vals[4 - i]);
+               RETURN_IF_ERROR_FBK(status != 0, "fbk hash update failed");
+       }
+
+       /* Find new values. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_lookup(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status != vals[4-i],
+                               "fbk hash lookup failed");
+       }
+
+       /* Delete keys individually. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_delete_key(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status != 0, "fbk hash delete failed");
+       }
+
+       used_entries = rte_fbk_hash_get_load_factor(handle) * LOCAL_FBK_HASH_ENTRIES_MAX;
+       RETURN_IF_ERROR_FBK((unsigned)used_entries != 0, \
+                               "load factor right after deletion is not zero but it should be");
+       /* Lookup should now fail. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_lookup(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status == 0,
+                               "fbk hash lookup should have failed");
+       }
+
+       /* Add keys again. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_add_key(handle, keys[i], vals[i]);
+               RETURN_IF_ERROR_FBK(status != 0, "fbk hash add failed");
+       }
+
+       /* Make sure they were added. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_lookup(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status != vals[i],
+                               "fbk hash lookup failed");
+       }
+
+       /* Clear all entries. */
+       rte_fbk_hash_clear_all(handle);
+
+       /* Lookup should fail. */
+       for (i = 0; i < 5; i++) {
+               status = rte_fbk_hash_lookup(handle, keys[i]);
+               RETURN_IF_ERROR_FBK(status == 0,
+                               "fbk hash lookup should have failed");
+       }
+
+       /* Cleanup. */
+       rte_fbk_hash_free(handle);
+
+       /* Cover the NULL case. */
+       rte_fbk_hash_free(0);
+
+       return 0;
+}
+
+/* Control operation of performance testing of fbk hash. */
+#define LOAD_FACTOR 0.667      /* How full to make the hash table. */
+#define TEST_SIZE 1000000      /* How many operations to time. */
+#define TEST_ITERATIONS 30     /* How many measurements to take. */
+#define ENTRIES (1 << 15)      /* How many entries. */
+
+static int
+fbk_hash_perf_test(void)
+{
+       struct rte_fbk_hash_params params = {
+               .name = "fbk_hash_test",
+               .entries = ENTRIES,
+               .entries_per_bucket = 4,
+               .socket_id = 0,
+       };
+       struct rte_fbk_hash_table *handle;
+       uint32_t keys[ENTRIES] = {0};
+       unsigned indexes[TEST_SIZE];
+       uint64_t lookup_time = 0;
+       unsigned added = 0;
+       unsigned value = 0;
+       unsigned i, j;
+
+       handle = rte_fbk_hash_create(&params);
+       RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed");
+
+       /* Generate random keys and values. */
+       for (i = 0; i < ENTRIES; i++) {
+               uint32_t key = (uint32_t)rte_rand();
+               key = ((uint64_t)key << 32) | (uint64_t)rte_rand();
+               uint16_t val = (uint16_t)rte_rand();
+
+               if (rte_fbk_hash_add_key(handle, key, val) == 0) {
+                       keys[added] = key;
+                       added++;
+               }
+               if (added > (LOAD_FACTOR * ENTRIES)) {
+                       break;
+               }
+       }
+
+       for (i = 0; i < TEST_ITERATIONS; i++) {
+               uint64_t begin;
+               uint64_t end;
+
+               /* Generate random indexes into keys[] array. */
+               for (j = 0; j < TEST_SIZE; j++) {
+                       indexes[j] = rte_rand() % added;
+               }
+
+               begin = rte_rdtsc();
+               /* Do lookups */
+               for (j = 0; j < TEST_SIZE; j++) {
+                       value += rte_fbk_hash_lookup(handle, keys[indexes[j]]);
+               }
+               end = rte_rdtsc();
+               lookup_time += (double)(end - begin);
+       }
+
+       printf("\n\n *** FBK Hash function performance test results ***\n");
+       /*
+        * The use of the 'value' variable ensures that the hash lookup is not
+        * being optimised out by the compiler.
+        */
+       if (value != 0)
+               printf("Number of ticks per lookup = %g\n",
+                       (double)lookup_time /
+                       ((double)TEST_ITERATIONS * (double)TEST_SIZE));
+
+       rte_fbk_hash_free(handle);
+
+       return 0;
+}
+
+/*
+ * Sequence of operations for find existing fbk hash table
+ *
+ *  - create table
+ *  - find existing table: hit
+ *  - find non-existing table: miss
+ *
+ */
+static int test_fbk_hash_find_existing(void)
+{
+       struct rte_fbk_hash_params params = {
+                       .name = "fbk_hash_find_existing",
+                       .entries = LOCAL_FBK_HASH_ENTRIES_MAX,
+                       .entries_per_bucket = 4,
+                       .socket_id = 0,
+       };
+       struct rte_fbk_hash_table *handle = NULL, *result = NULL;
+
+       /* Create hash table. */
+       handle = rte_fbk_hash_create(&params);
+       RETURN_IF_ERROR_FBK(handle == NULL, "fbk hash creation failed");
+
+       /* Try to find existing fbk hash table */
+       result = rte_fbk_hash_find_existing("fbk_hash_find_existing");
+       RETURN_IF_ERROR_FBK(result != handle, "could not find existing fbk hash table");
+
+       /* Try to find non-existing fbk hash table */
+       result = rte_fbk_hash_find_existing("fbk_hash_find_non_existing");
+       RETURN_IF_ERROR_FBK(!(result == NULL), "found fbk table that shouldn't exist");
+
+       /* Cleanup. */
+       rte_fbk_hash_free(handle);
+
+       return 0;
+}
+
+/*
+ * Do tests for hash creation with bad parameters.
+ */
+static int test_hash_creation_with_bad_parameters(void)
+{
+       struct rte_hash *handle;
+       struct rte_hash_parameters params;
+
+       handle = rte_hash_create(NULL);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully without any parameter\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_0";
+       params.entries = RTE_HASH_ENTRIES_MAX + 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully with entries in parameter exceeded\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_1";
+       params.bucket_entries = RTE_HASH_BUCKET_ENTRIES_MAX + 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully with bucket_entries in parameter exceeded\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_2";
+       params.entries = params.bucket_entries - 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully if entries less than bucket_entries in parameter\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_3";
+       params.entries = params.entries - 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully if entries in parameter is not power of 2\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_4";
+       params.bucket_entries = params.bucket_entries - 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully if bucket_entries in parameter is not power of 2\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_5";
+       params.key_len = 0;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully if key_len in parameter is zero\n");
+               return -1;
+       }
+
+       memcpy(&params, &ut_params, sizeof(params));
+       params.name = "creation_with_bad_parameters_6";
+       params.key_len = RTE_HASH_KEY_LENGTH_MAX + 1;
+       handle = rte_hash_create(&params);
+       if (handle != NULL) {
+               rte_hash_free(handle);
+               printf("Impossible creating hash sucessfully if key_len is greater than the maximun\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static uint8_t key[16] = {0x00, 0x01, 0x02, 0x03,
+                       0x04, 0x05, 0x06, 0x07,
+                       0x08, 0x09, 0x0a, 0x0b,
+                       0x0c, 0x0d, 0x0e, 0x0f};
+static struct rte_hash_parameters hash_params_ex = {
+       .name = NULL,
+       .entries = 64,
+       .bucket_entries = 4,
+       .key_len = 0,
+       .hash_func = NULL,
+       .hash_func_init_val = 0,
+       .socket_id = 0,
+};
+
+/*
+ * add/delete key with jhash2
+ */
+static int
+test_hash_add_delete_jhash2(void)
+{
+       int ret = -1;
+       struct rte_hash *handle;
+       int32_t pos1, pos2;
+
+       hash_params_ex.name = "hash_test_jhash2";
+       hash_params_ex.key_len = 4;
+       hash_params_ex.hash_func = (rte_hash_function)rte_jhash2;
+
+       handle = rte_hash_create(&hash_params_ex);
+       if (handle == NULL) {
+               printf("test_hash_add_delete_jhash2 fail to create hash\n");
+               goto fail_jhash2;
+       }
+       pos1 = rte_hash_add_key(handle, (void *)&key[0]);
+       if (pos1 < 0) {
+               printf("test_hash_add_delete_jhash2 fail to add hash key\n");
+               goto fail_jhash2;
+       }
+
+       pos2 = rte_hash_del_key(handle, (void *)&key[0]);
+       if (pos2 < 0 || pos1 != pos2) {
+               printf("test_hash_add_delete_jhash2 delete different key from being added\n");
+               goto fail_jhash2;
+       }
+       ret = 0;
+
+fail_jhash2:
+       if (handle != NULL)
+               rte_hash_free(handle);
+
+       return ret;
+}
+
+/*
+ * add/delete (2) key with jhash2
+ */
+static int
+test_hash_add_delete_2_jhash2(void)
+{
+       int ret = -1;
+       struct rte_hash *handle;
+       int32_t pos1, pos2;
+
+       hash_params_ex.name = "hash_test_2_jhash2";
+       hash_params_ex.key_len = 8;
+       hash_params_ex.hash_func = (rte_hash_function)rte_jhash2;
+
+       handle = rte_hash_create(&hash_params_ex);
+       if (handle == NULL)
+               goto fail_2_jhash2;
+
+       pos1 = rte_hash_add_key(handle, (void *)&key[0]);
+       if (pos1 < 0)
+               goto fail_2_jhash2;
+
+       pos2 = rte_hash_del_key(handle, (void *)&key[0]);
+       if (pos2 < 0 || pos1 != pos2)
+               goto fail_2_jhash2;
+
+       ret = 0;
+
+fail_2_jhash2:
+       if (handle != NULL)
+               rte_hash_free(handle);
+
+       return ret;
+}
+
+static uint32_t
+test_hash_jhash_1word(const void *key, uint32_t length, uint32_t initval)
+{
+       const uint32_t *k = key;
+
+       length =length;
+
+       return rte_jhash_1word(k[0], initval);
+}
+
+static uint32_t
+test_hash_jhash_2word(const void *key, uint32_t length, uint32_t initval)
+{
+       const uint32_t *k = key;
+
+       length =length;
+
+       return rte_jhash_2words(k[0], k[1], initval);
+}
+
+static uint32_t
+test_hash_jhash_3word(const void *key, uint32_t length, uint32_t initval)
+{
+       const uint32_t *k = key;
+
+       length =length;
+
+       return rte_jhash_3words(k[0], k[1], k[2], initval);
+}
+
+/*
+ * add/delete key with jhash 1word
+ */
+static int
+test_hash_add_delete_jhash_1word(void)
+{
+       int ret = -1;
+       struct rte_hash *handle;
+       int32_t pos1, pos2;
+
+       hash_params_ex.name = "hash_test_jhash_1word";
+       hash_params_ex.key_len = 4;
+       hash_params_ex.hash_func = test_hash_jhash_1word;
+
+       handle = rte_hash_create(&hash_params_ex);
+       if (handle == NULL)
+               goto fail_jhash_1word;
+
+       pos1 = rte_hash_add_key(handle, (void *)&key[0]);
+       if (pos1 < 0)
+               goto fail_jhash_1word;
+
+       pos2 = rte_hash_del_key(handle, (void *)&key[0]);
+       if (pos2 < 0 || pos1 != pos2)
+               goto fail_jhash_1word;
+
+       ret = 0;
+
+fail_jhash_1word:
+       if (handle != NULL)
+               rte_hash_free(handle);
+
+       return ret;
+}
+
+/*
+ * add/delete key with jhash 2word
+ */
+static int
+test_hash_add_delete_jhash_2word(void)
+{
+       int ret = -1;
+       struct rte_hash *handle;
+       int32_t pos1, pos2;
+
+       hash_params_ex.name = "hash_test_jhash_2word";
+       hash_params_ex.key_len = 8;
+       hash_params_ex.hash_func = test_hash_jhash_2word;
+
+       handle = rte_hash_create(&hash_params_ex);
+       if (handle == NULL)
+               goto fail_jhash_2word;
+
+       pos1 = rte_hash_add_key(handle, (void *)&key[0]);
+       if (pos1 < 0)
+               goto fail_jhash_2word;
+
+       pos2 = rte_hash_del_key(handle, (void *)&key[0]);
+       if (pos2 < 0 || pos1 != pos2)
+               goto fail_jhash_2word;
+
+       ret = 0;
+
+fail_jhash_2word:
+       if (handle != NULL)
+               rte_hash_free(handle);
+
+       return ret;
+}
+
+/*
+ * add/delete key with jhash 3word
+ */
+static int
+test_hash_add_delete_jhash_3word(void)
+{
+       int ret = -1;
+       struct rte_hash *handle;
+       int32_t pos1, pos2;
+
+       hash_params_ex.name = "hash_test_jhash_3word";
+       hash_params_ex.key_len = 12;
+       hash_params_ex.hash_func = test_hash_jhash_3word;
+
+       handle = rte_hash_create(&hash_params_ex);
+       if (handle == NULL)
+               goto fail_jhash_3word;
+
+       pos1 = rte_hash_add_key(handle, (void *)&key[0]);
+       if (pos1 < 0)
+               goto fail_jhash_3word;
+
+       pos2 = rte_hash_del_key(handle, (void *)&key[0]);
+       if (pos2 < 0 || pos1 != pos2)
+               goto fail_jhash_3word;
+
+       ret = 0;
+
+fail_jhash_3word:
+       if (handle != NULL)
+               rte_hash_free(handle);
+
+       return ret;
+}
+
+/*
+ * Do all unit and performance tests.
+ */
+int test_hash(void)
+{
+       if (test_add_delete() < 0)
+               return -1;
+       if (test_hash_add_delete_jhash2() < 0)
+               return -1;
+       if (test_hash_add_delete_2_jhash2() < 0)
+               return -1;
+       if (test_hash_add_delete_jhash_1word() < 0)
+               return -1;
+       if (test_hash_add_delete_jhash_2word() < 0)
+               return -1;
+       if (test_hash_add_delete_jhash_3word() < 0)
+               return -1;
+       if (test_hash_find_existing() < 0)
+               return -1;
+       if (test_add_update_delete() < 0)
+               return -1;
+       if (test_five_keys() < 0)
+               return -1;
+       if (test_full_bucket() < 0)
+               return -1;
+       if (run_all_tbl_perf_tests() < 0)
+               return -1;
+       run_hash_func_tests();
+
+       if (test_fbk_hash_find_existing() < 0)
+               return -1;
+       if (fbk_hash_unit_test() < 0)
+               return -1;
+       if (fbk_hash_perf_test() < 0)
+               return -1;
+       if (test_hash_creation_with_bad_parameters() < 0)
+               return -1;
+       return 0;
+}
+#else
+
+int
+test_hash(void)
+{
+       printf("The Hash library is not included in this build\n");
+       return 0;
+}
+
+#endif
diff --git a/app/test/test_interrupts.c b/app/test/test_interrupts.c
new file mode 100644 (file)
index 0000000..c52ec71
--- /dev/null
@@ -0,0 +1,419 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_interrupts.h>
+
+#include "test.h"
+
+#define TEST_INTERRUPT_CHECK_INTERVAL 1000 /* ms */
+
+enum test_interrupt_handl_type {
+       TEST_INTERRUPT_HANDLE_INVALID,
+       TEST_INTERRUPT_HANDLE_VALID,
+       TEST_INTERRUPT_HANDLE_CASE1,
+       TEST_INTERRUPT_HANDLE_MAX
+};
+
+static volatile int flag;
+static struct rte_intr_handle intr_handles[TEST_INTERRUPT_HANDLE_MAX];
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+union intr_pipefds{
+       struct {
+               int pipefd[2];
+       };
+       struct {
+               int readfd;
+               int writefd;
+       };
+};
+
+static union intr_pipefds pfds;
+
+static inline int
+test_interrupt_handle_sanity_check(struct rte_intr_handle *intr_handle)
+{
+       if (!intr_handle || intr_handle->fd < 0)
+               return -1;
+
+       return 0;
+}
+
+static int
+test_interrupt_init(void)
+{
+       if (pipe(pfds.pipefd) < 0)
+               return -1;
+
+       intr_handles[TEST_INTERRUPT_HANDLE_INVALID].fd = -1;
+       intr_handles[TEST_INTERRUPT_HANDLE_INVALID].type = RTE_INTR_HANDLE_UNKNOWN;
+
+       intr_handles[TEST_INTERRUPT_HANDLE_VALID].fd = pfds.readfd;
+       intr_handles[TEST_INTERRUPT_HANDLE_VALID].type = RTE_INTR_HANDLE_UNKNOWN;
+
+       intr_handles[TEST_INTERRUPT_HANDLE_CASE1].fd = pfds.readfd;
+       intr_handles[TEST_INTERRUPT_HANDLE_CASE1].type = RTE_INTR_HANDLE_ALARM;
+
+       return 0;
+}
+
+static int
+test_interrupt_deinit(void)
+{
+       close(pfds.pipefd[0]);
+       close(pfds.pipefd[1]);
+
+       return 0;
+}
+
+static int
+test_interrupt_trigger_interrupt(void)
+{
+       if (write(pfds.writefd, "1", 1) < 0)
+               return -1;
+
+       return 0;
+}
+
+static int
+test_interrupt_handle_compare(struct rte_intr_handle *intr_handle_l,
+                               struct rte_intr_handle *intr_handle_r)
+{
+       if (!intr_handle_l || !intr_handle_r)
+               return -1;
+
+       if (intr_handle_l->fd != intr_handle_r->fd ||
+               intr_handle_l->type != intr_handle_r->type)
+               return -1;
+
+       return 0;
+}
+
+#else
+/* to be implemented for baremetal later */
+static inline int
+test_interrupt_handle_sanity_check(struct rte_intr_handle *intr_handle)
+{
+       RTE_SET_USED(intr_handle);
+
+       return 0;
+}
+
+static int
+test_interrupt_init(void)
+{
+       return 0;
+}
+
+static int
+test_interrupt_deinit(void)
+{
+       return 0;
+}
+
+static int
+test_interrupt_trigger_interrupt(void)
+{
+       return 0;
+}
+
+static int
+test_interrupt_handle_compare(struct rte_intr_handle *intr_handle_l,
+                               struct rte_intr_handle *intr_handle_r)
+{
+       (void)intr_handle_l;
+       (void)intr_handle_r;
+
+       return 0;
+}
+#endif /* RTE_EXEC_ENV_LINUXAPP */
+
+static void
+test_interrupt_callback(struct rte_intr_handle *intr_handle, void *arg)
+{
+       if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
+               printf("null or invalid intr_handle for %s\n", __FUNCTION__);
+               return;
+       }
+
+       if (rte_intr_callback_unregister(intr_handle,
+                       test_interrupt_callback, arg) <= 0) {
+               printf("fail to unregister callback\n");
+               return;
+       }
+
+       if (test_interrupt_handle_compare(intr_handle,
+               &(intr_handles[TEST_INTERRUPT_HANDLE_VALID])) == 0) {
+               flag = 1;
+       }
+}
+
+static void
+test_interrupt_callback_1(struct rte_intr_handle *intr_handle, void *arg)
+{
+       if (test_interrupt_handle_sanity_check(intr_handle) < 0) {
+               printf("null or invalid intr_handle for %s\n", __FUNCTION__);
+               return;
+       }
+       if (rte_intr_callback_unregister(intr_handle,
+                       test_interrupt_callback_1, arg) <= 0) {
+               printf("fail to unregister callback\n");
+               return;
+       }
+}
+
+static int
+test_interrupt_enable(void)
+{
+       struct rte_intr_handle test_intr_handle;
+
+       /* check with null intr_handle */
+       if (rte_intr_enable(NULL) == 0) {
+               printf("unexpectedly enable null intr_handle successfully\n");
+               return -1;
+       }
+
+       /* check with invalid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
+       if (rte_intr_enable(&test_intr_handle) == 0) {
+               printf("unexpectedly enable invalid intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       /* check with valid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       if (rte_intr_enable(&test_intr_handle) == 0) {
+               printf("unexpectedly enable a specific intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       /* check with specific valid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
+       if (rte_intr_enable(&test_intr_handle) == 0) {
+               printf("unexpectedly enable a specific intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+test_interrupt_disable(void)
+{
+       struct rte_intr_handle test_intr_handle;
+
+       /* check with null intr_handle */
+       if (rte_intr_disable(NULL) == 0) {
+               printf("unexpectedly disable null intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       /* check with invalid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
+       if (rte_intr_disable(&test_intr_handle) == 0) {
+               printf("unexpectedly disable invalid intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       /* check with valid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       if (rte_intr_disable(&test_intr_handle) == 0) {
+               printf("unexpectedly disable a specific intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       /* check with specific valid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_CASE1];
+       if (rte_intr_disable(&test_intr_handle) == 0) {
+               printf("unexpectedly disable a specific intr_handle "
+                       "successfully\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+test_interrupt(void)
+{
+       int count = 0, ret = -1;
+       struct rte_intr_handle test_intr_handle;
+
+       if (test_interrupt_init() < 0) {
+               printf("fail to do test init\n");
+               return -1;
+       }
+
+       printf("check if callback registered can be called\n");
+
+       /* check if callback registered can be called */
+       flag = 0;
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       if (rte_intr_callback_register(&test_intr_handle,
+                       test_interrupt_callback, NULL) < 0) {
+               printf("fail to register callback\n");
+               goto out;
+       }
+       /* trigger an interrupt and then check if the callback can be called */
+       if (test_interrupt_trigger_interrupt() < 0) {
+               printf("fail to trigger an interrupt\n");
+               goto out;
+       }
+       /* check flag in 3 seconds */
+       while (flag == 0 && count++ < 3)
+               rte_delay_ms(TEST_INTERRUPT_CHECK_INTERVAL);
+       if (flag == 0) {
+               printf("registered callback has not been called\n");
+               goto out;
+       }
+       rte_delay_ms(1000);
+
+       printf("start register/unregister test\n");
+
+       /* check if it will fail to register cb with intr_handle = NULL */
+       if (rte_intr_callback_register(NULL, test_interrupt_callback,
+                                                       NULL) == 0) {
+               printf("unexpectedly register successfully with null "
+                       "intr_handle\n");
+               goto out;
+       }
+
+       /* check if it will fail to register cb with invalid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
+       if (rte_intr_callback_register(&test_intr_handle,
+                       test_interrupt_callback, NULL) == 0) {
+               printf("unexpectedly register successfully with invalid "
+                       "intr_handle\n");
+               goto out;
+       }
+
+       /* check if it will fail to register without callback */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       if (rte_intr_callback_register(&test_intr_handle, NULL, NULL) == 0) {
+               printf("unexpectedly register successfully with "
+                       "null callback\n");
+               goto out;
+       }
+
+       /* check if it will fail to unregister cb with intr_handle = NULL */
+       if (rte_intr_callback_unregister(NULL,
+                       test_interrupt_callback, NULL) > 0) {
+               printf("unexpectedly unregister successfully with "
+                       "null intr_handle\n");
+               goto out;
+       }
+
+       /* check if it will fail to unregister cb with invalid intr_handle */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_INVALID];
+       if (rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback, NULL) > 0) {
+               printf("unexpectedly unregister successfully with "
+                       "invalid intr_handle\n");
+               goto out;
+       }
+
+       /* check if it is ok to register the same intr_handle twice */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       if (rte_intr_callback_register(&test_intr_handle,
+                       test_interrupt_callback, NULL) < 0) {
+               printf("it fails to register test_interrupt_callback\n");
+               goto out;
+       }
+       if (rte_intr_callback_register(&test_intr_handle,
+                       test_interrupt_callback_1, NULL) < 0) {
+               printf("it fails to register test_interrupt_callback_1\n");
+               goto out;
+       }
+       /* check if it will fail to unregister with invalid parameter */
+       if (rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback, (void *)0xff) != 0) {
+               printf("unexpectedly unregisters successfully with invalid arg\n");
+               goto out;
+       }
+       if (rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback, NULL) <= 0) {
+               printf("it fails to unregister test_interrupt_callback\n");
+               goto out;
+       }
+       if (rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback_1, (void *)-1) <= 0) {
+               printf("it fails to unregister test_interrupt_callback_1 "
+                       "for all\n");
+               goto out;
+       }
+       rte_delay_ms(1000);
+
+       printf("start interrupt enable/disable test\n");
+
+       /* check interrupt enable/disable functions */
+       if (test_interrupt_enable() < 0)
+               goto out;
+       rte_delay_ms(1000);
+
+       if (test_interrupt_disable() < 0)
+               goto out;
+       rte_delay_ms(1000);
+
+       ret = 0;
+
+out:
+       /* clear registered callbacks */
+       test_intr_handle = intr_handles[TEST_INTERRUPT_HANDLE_VALID];
+       rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback, (void *)-1);
+       rte_intr_callback_unregister(&test_intr_handle,
+                       test_interrupt_callback_1, (void *)-1);
+
+       rte_delay_ms(2000);
+       /* deinit */
+       test_interrupt_deinit();
+
+       return ret;
+}
+
diff --git a/app/test/test_logs.c b/app/test/test_logs.c
new file mode 100644 (file)
index 0000000..c5aac9c
--- /dev/null
@@ -0,0 +1,96 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+#include "test.h"
+
+#define RTE_LOGTYPE_TESTAPP1 RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_TESTAPP2 RTE_LOGTYPE_USER2
+
+/*
+ * Logs
+ * ====
+ *
+ * - Enable log types.
+ * - Set log level.
+ * - Send logs with different types and levels, some should not be displayed.
+ */
+
+int
+test_logs(void)
+{
+       /* enable these logs type */
+       rte_set_log_type(RTE_LOGTYPE_TESTAPP1, 1);
+       rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 1);
+
+       /* log in debug level */
+       rte_set_log_level(RTE_LOG_DEBUG);
+       RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n");
+       RTE_LOG(INFO, TESTAPP1, "this is a info level message\n");
+       RTE_LOG(WARNING, TESTAPP1, "this is a warning level message\n");
+
+       /* log in info level */
+       rte_set_log_level(RTE_LOG_INFO);
+       RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
+       RTE_LOG(INFO, TESTAPP2, "this is a info level message\n");
+       RTE_LOG(WARNING, TESTAPP2, "this is a warning level message\n");
+
+       /* disable one log type */
+       rte_set_log_type(RTE_LOGTYPE_TESTAPP2, 0);
+
+       /* log in debug level */
+       rte_set_log_level(RTE_LOG_DEBUG);
+       RTE_LOG(DEBUG, TESTAPP1, "this is a debug level message\n");
+       RTE_LOG(DEBUG, TESTAPP2, "debug level message (not displayed)\n");
+
+       rte_log_dump_history();
+
+       return 0;
+}
diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c
new file mode 100644 (file)
index 0000000..3a9400f
--- /dev/null
@@ -0,0 +1,1365 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_random.h>
+#include <rte_branch_prediction.h>
+#include <rte_ip.h>
+#include <time.h>
+
+#ifdef RTE_LIBRTE_LPM
+
+#include "rte_lpm.h"
+#include "test_lpm_routes.h"
+
+#include "test.h"
+
+#define ITERATIONS (1 << 20)
+#define BATCH_SIZE (1 << 13)
+
+#define TEST_LPM_ASSERT(cond) do {                                            \
+       if (!(cond)) {                                                        \
+               printf("Error at line %d: \n", __LINE__);                     \
+               return -1;                                                    \
+       }                                                                     \
+} while(0)
+
+
+
+typedef int32_t (* rte_lpm_test)(void);
+
+static int32_t test0(void);
+static int32_t test1(void);
+static int32_t test2(void);
+static int32_t test3(void);
+static int32_t test4(void);
+static int32_t test5(void);
+static int32_t test6(void);
+static int32_t test7(void);
+static int32_t test8(void);
+static int32_t test9(void);
+static int32_t test10(void);
+static int32_t test11(void);
+static int32_t test12(void);
+static int32_t test13(void);
+static int32_t test14(void);
+static int32_t test15(void);
+static int32_t test16(void);
+static int32_t test17(void);
+static int32_t test18(void);
+
+rte_lpm_test tests[] = {
+/* Test Cases */
+       test0,
+       test1,
+       test2,
+       test3,
+       test4,
+       test5,
+       test6,
+       test7,
+       test8,
+       test9,
+       test10,
+       test11,
+       test12,
+       test13,
+       test14,
+       test15,
+       test16,
+       test17,
+       test18
+};
+
+#define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
+#define MAX_DEPTH 32
+#define MAX_RULES 256
+#define PASS 0
+
+/*
+ * TEST 0
+ *
+ * Check that rte_lpm_create fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test0(void)
+{
+       struct rte_lpm *lpm = NULL;
+
+       /* rte_lpm_create: lpm name == NULL */
+       lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm == NULL);
+
+       /* rte_lpm_create: max_rules = 0 */
+       /* Note: __func__ inserts the function name, in this case "test0". */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 0, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm == NULL);
+
+       /* rte_lpm_create: mem_location is not RTE_LPM_HEAP or not MEMZONE */
+       /* Note: __func__ inserts the function name, in this case "test0". */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, 2);
+       TEST_LPM_ASSERT(lpm == NULL);
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, -1);
+       TEST_LPM_ASSERT(lpm == NULL);
+
+       /* socket_id < -1 is invalid */
+       lpm = rte_lpm_create(__func__, -2, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm == NULL);
+
+       return PASS;
+}
+
+/* TEST 1
+ *
+ * Create lpm table then delete lpm table 100 times
+ * Use a slightly different rules size each time
+ * */
+int32_t
+test1(void)
+{
+       struct rte_lpm *lpm = NULL;
+       int32_t i;
+
+       /* rte_lpm_free: Free NULL */
+       for (i = 0; i < 100; i++) {
+               lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES - i,
+                               RTE_LPM_HEAP);
+               TEST_LPM_ASSERT(lpm != NULL);
+
+               rte_lpm_free(lpm);
+       }
+
+       /* Can not test free so return success */
+       return PASS;
+}
+
+/* TEST 2
+ *
+ * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
+ * therefore it is impossible to check for failure but this test is added to
+ * increase function coverage metrics and to validate that freeing null does
+ * not crash.
+ */
+int32_t
+test2(void)
+{
+       struct rte_lpm *lpm = NULL;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       rte_lpm_free(lpm);
+       rte_lpm_free(NULL);
+       return PASS;
+}
+
+/* TEST 3
+ *
+ * Check that rte_lpm_add fails gracefully for incorrect user input arguments
+ */
+int32_t
+test3(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip = IPv4(0, 0, 0, 0);
+       uint8_t depth = 24, next_hop = 100;
+       int32_t status = 0;
+
+       /* rte_lpm_add: lpm == NULL */
+       status = rte_lpm_add(NULL, ip, depth, next_hop);
+       TEST_LPM_ASSERT(status < 0);
+
+       /*Create vaild lpm to use in rest of test. */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       /* rte_lpm_add: depth < 1 */
+       status = rte_lpm_add(lpm, ip, 0, next_hop);
+       TEST_LPM_ASSERT(status < 0);
+
+       /* rte_lpm_add: depth > MAX_DEPTH */
+       status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
+       TEST_LPM_ASSERT(status < 0);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 4
+ *
+ * Check that rte_lpm_delete fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test4(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip = IPv4(0, 0, 0, 0);
+       uint8_t depth = 24;
+       int32_t status = 0;
+
+       /* rte_lpm_delete: lpm == NULL */
+       status = rte_lpm_delete(NULL, ip, depth);
+       TEST_LPM_ASSERT(status < 0);
+
+       /*Create vaild lpm to use in rest of test. */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       /* rte_lpm_delete: depth < 1 */
+       status = rte_lpm_delete(lpm, ip, 0);
+       TEST_LPM_ASSERT(status < 0);
+
+       /* rte_lpm_delete: depth > MAX_DEPTH */
+       status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
+       TEST_LPM_ASSERT(status < 0);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 5
+ *
+ * Check that rte_lpm_lookup fails gracefully for incorrect user input
+ * arguments
+ */
+int32_t
+test5(void)
+{
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip = IPv4(0, 0, 0, 0);
+       uint8_t next_hop_return = 0;
+       int32_t status = 0;
+
+       /* rte_lpm_lookup: lpm == NULL */
+       status = rte_lpm_lookup(NULL, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status < 0);
+
+       /*Create vaild lpm to use in rest of test. */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       /* rte_lpm_lookup: depth < 1 */
+       status = rte_lpm_lookup(lpm, ip, NULL);
+       TEST_LPM_ASSERT(status < 0);
+
+       rte_lpm_free(lpm);
+#endif
+       return PASS;
+}
+
+
+
+/* TEST 6
+ *
+ * Call add, lookup and delete for a single rule with depth <= 24
+ */
+int32_t
+test6(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip = IPv4(0, 0, 0, 0);
+       uint8_t depth = 24, next_hop_add = 100, next_hop_return = 0;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 7
+ *
+ * Call add, lookup and delete for a single rule with depth > 24
+ */
+
+int32_t
+test7(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip = IPv4(0, 0, 0, 0);
+       uint8_t depth = 32, next_hop_add = 100, next_hop_return = 0;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 8
+ *
+ * Use rte_lpm_add to add rules which effect only the second half of the lpm
+ * table. Use all possible depths ranging from 1..32. Set the next hop = to the
+ * depth. Check lookup hit for on every add and check for lookup miss on the
+ * first half of the lpm table after each add. Finally delete all rules going
+ * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
+ * delete. The lookup should return the next_hop_add value related to the
+ * previous depth value (i.e. depth -1).
+ */
+int32_t
+test8(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
+       uint8_t depth, next_hop_add, next_hop_return;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       /* Loop with rte_lpm_add. */
+       for (depth = 1; depth <= 32; depth++) {
+               /* Let the next_hop_add value = depth. Just for change. */
+               next_hop_add = depth;
+
+               status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
+               TEST_LPM_ASSERT(status == 0);
+
+               /* Check IP in first half of tbl24 which should be empty. */
+               status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+               TEST_LPM_ASSERT(status == -ENOENT);
+
+               status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+               TEST_LPM_ASSERT((status == 0) &&
+                       (next_hop_return == next_hop_add));
+       }
+
+       /* Loop with rte_lpm_delete. */
+       for (depth = 32; depth >= 1; depth--) {
+               next_hop_add = (uint8_t) (depth - 1);
+
+               status = rte_lpm_delete(lpm, ip2, depth);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
+
+               if (depth != 1) {
+                       TEST_LPM_ASSERT((status == 0) &&
+                               (next_hop_return == next_hop_add));
+               }
+               else {
+                       TEST_LPM_ASSERT(status == -ENOENT);
+               }
+
+               status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
+               TEST_LPM_ASSERT(status == -ENOENT);
+       }
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 9
+ *
+ * - Add & lookup to hit invalid TBL24 entry
+ * - Add & lookup to hit valid TBL24 entry not extended
+ * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
+ * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
+ *
+ */
+int32_t
+test9(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip, ip_1, ip_2;
+       uint8_t depth, depth_1, depth_2, next_hop_add, next_hop_add_1,
+               next_hop_add_2, next_hop_return;
+       int32_t status = 0;
+
+       /* Add & lookup to hit invalid TBL24 entry */
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add = 100;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add & lookup to hit valid TBL24 entry not extended */
+       ip = IPv4(128, 0, 0, 0);
+       depth = 23;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       depth = 24;
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       depth = 24;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       depth = 23;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
+        * entry */
+       ip = IPv4(128, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       ip = IPv4(128, 0, 0, 5);
+       depth = 32;
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add & lookup to hit valid extended TBL24 entry with valid TBL8
+        * entry */
+       ip_1 = IPv4(128, 0, 0, 0);
+       depth_1 = 25;
+       next_hop_add_1 = 101;
+
+       ip_2 = IPv4(128, 0, 0, 5);
+       depth_2 = 32;
+       next_hop_add_2 = 102;
+
+       next_hop_return = 0;
+
+       status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+       status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
+
+       status = rte_lpm_delete(lpm, ip_2, depth_2);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+       status = rte_lpm_delete(lpm, ip_1, depth_1);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+
+/* TEST 10
+ *
+ * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
+ *   lookup)
+ * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
+ * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
+ *   delete & lookup)
+ * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
+ * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
+ * - Delete a rule that is not present in the TBL24 & lookup
+ * - Delete a rule that is not present in the TBL8 & lookup
+ *
+ */
+int32_t
+test10(void)
+{
+
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip;
+       uint8_t depth, next_hop_add, next_hop_return;
+       int32_t status = 0;
+
+       /* Add rule that covers a TBL24 range previously invalid & lookup
+        * (& delete & lookup) */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 16;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 25;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add rule that extends a TBL24 valid entry & lookup for both rules
+        * (& delete & lookup) */
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       ip = IPv4(128, 0, 0, 10);
+       depth = 32;
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       ip = IPv4(128, 0, 0, 0);
+       next_hop_add = 100;
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       ip = IPv4(128, 0, 0, 10);
+       depth = 32;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add rule that updates the next hop in TBL24 & lookup
+        * (& delete & lookup) */
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Add rule that updates the next hop in TBL8 & lookup
+        * (& delete & lookup) */
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Delete a rule that is not present in the TBL24 & lookup */
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add = 100;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status < 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_delete_all(lpm);
+
+       /* Delete a rule that is not present in the TBL8 & lookup */
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status < 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 11
+ *
+ * Add two rules, lookup to hit the more specific one, lookup to hit the less
+ * specific one delete the less specific rule and lookup previous values again;
+ * add a more specific rule than the existing rule, lookup again
+ *
+ * */
+int32_t
+test11(void)
+{
+
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip;
+       uint8_t depth, next_hop_add, next_hop_return;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       ip = IPv4(128, 0, 0, 10);
+       depth = 32;
+       next_hop_add = 101;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       ip = IPv4(128, 0, 0, 0);
+       next_hop_add = 100;
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       ip = IPv4(128, 0, 0, 10);
+       depth = 32;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 12
+ *
+ * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
+ * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
+ * and contraction.
+ *
+ * */
+
+int32_t
+test12(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip, i;
+       uint8_t depth, next_hop_add, next_hop_return;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       for (i = 0; i < 1000; i++) {
+               status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+               TEST_LPM_ASSERT((status == 0) &&
+                               (next_hop_return == next_hop_add));
+
+               status = rte_lpm_delete(lpm, ip, depth);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+               TEST_LPM_ASSERT(status == -ENOENT);
+       }
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 13
+ *
+ * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
+ * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
+ * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
+ * extension and contraction.
+ *
+ * */
+
+int32_t
+test13(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip, i;
+       uint8_t depth, next_hop_add_1, next_hop_add_2, next_hop_return;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       ip = IPv4(128, 0, 0, 0);
+       depth = 24;
+       next_hop_add_1 = 100;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
+
+       depth = 32;
+       next_hop_add_2 = 101;
+
+       for (i = 0; i < 1000; i++) {
+               status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+               TEST_LPM_ASSERT((status == 0) &&
+                               (next_hop_return == next_hop_add_2));
+
+               status = rte_lpm_delete(lpm, ip, depth);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+               TEST_LPM_ASSERT((status == 0) &&
+                               (next_hop_return == next_hop_add_1));
+       }
+
+       depth = 24;
+
+       status = rte_lpm_delete(lpm, ip, depth);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+       TEST_LPM_ASSERT(status == -ENOENT);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST 14
+ *
+ * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
+ * No more tbl8 extensions will be allowed. Now add one more rule that required
+ * a tbl8 extension and get fail.
+ * */
+int32_t
+test14(void)
+{
+
+       /* We only use depth = 32 in the loop below so we must make sure
+        * that we have enough storage for all rules at that depth*/
+
+       struct rte_lpm *lpm = NULL;
+       uint32_t ip;
+       uint8_t depth, next_hop_add, next_hop_return;
+       int32_t status = 0;
+
+       /* Add enough space for 256 rules for every depth */
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       ip = IPv4(0, 0, 0, 0);
+       depth = 32;
+       next_hop_add = 100;
+
+       /* Add 256 rules that require a tbl8 extension */
+       for (ip = 0; ip <= IPv4(0, 0, 255, 0); ip += 256) {
+               status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+               TEST_LPM_ASSERT(status == 0);
+
+               status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+               TEST_LPM_ASSERT((status == 0) &&
+                               (next_hop_return == next_hop_add));
+       }
+
+       /* All tbl8 extensions have been used above. Try to add one more and
+        * we get a fail */
+       ip = IPv4(1, 0, 0, 0);
+       depth = 32;
+
+       status = rte_lpm_add(lpm, ip, depth, next_hop_add);
+       TEST_LPM_ASSERT(status < 0);
+
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/* TEST test15
+ *
+ * Lookup performance test using Mae West Routing Table
+ */
+static inline uint32_t
+depth_to_mask(uint8_t depth) {
+       return (int)0x80000000 >> (depth - 1);
+}
+
+static uint32_t
+rule_table_check_for_duplicates(const struct route_rule *table, uint32_t n){
+       unsigned i, j, count;
+
+       count = 0;
+       for (i = 0; i < (n - 1); i++) {
+               uint8_t depth1 = table[i].depth;
+               uint32_t ip1_masked = table[i].ip & depth_to_mask(depth1);
+
+               for (j = (i + 1); j <n; j ++) {
+                       uint8_t depth2 = table[j].depth;
+                       uint32_t ip2_masked = table[j].ip &
+                                       depth_to_mask(depth2);
+
+                       if ((depth1 == depth2) && (ip1_masked == ip2_masked)){
+                               printf("Rule %u is a duplicate of rule %u\n",
+                                               j, i);
+                               count ++;
+                       }
+               }
+       }
+
+       return count;
+}
+
+static int32_t
+rule_table_characterisation(const struct route_rule *table, uint32_t n){
+       unsigned i, j;
+
+       printf("DEPTH           QUANTITY (PERCENT)\n");
+       printf("--------------------------------- \n");
+       /* Count depths. */
+       for(i = 1; i <= 32; i++) {
+               unsigned depth_counter = 0;
+               double percent_hits;
+
+               for (j = 0; j < n; j++) {
+                       if (table[j].depth == (uint8_t) i)
+                               depth_counter++;
+               }
+
+               percent_hits = ((double)depth_counter)/((double)n) * 100;
+
+               printf("%u      -       %5u (%.2f)\n",
+                               i, depth_counter, percent_hits);
+       }
+
+       return 0;
+}
+
+static inline uint64_t
+div64(uint64_t dividend, uint64_t divisor)
+{
+       return ((2 * dividend) + divisor) / (2 * divisor);
+}
+
+int32_t
+test15(void)
+{
+       struct rte_lpm *lpm = NULL;
+       uint64_t begin, end, total_time, lpm_used_entries = 0;
+       unsigned avg_ticks, i, j;
+       uint8_t next_hop_add = 0, next_hop_return = 0;
+       int32_t status = 0;
+
+       printf("Using Mae West routing table from www.oiforum.com\n");
+       printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
+       printf("No. duplicate routes = %u\n\n", (unsigned)
+               rule_table_check_for_duplicates(mae_west_tbl, NUM_ROUTE_ENTRIES));
+       printf("Route distribution per prefix width: \n");
+       rule_table_characterisation(mae_west_tbl,
+                       (uint32_t) NUM_ROUTE_ENTRIES);
+       printf("\n");
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, 1000000,
+                       RTE_LPM_MEMZONE);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       next_hop_add = 1;
+
+       /* Add */
+       /* Begin Timer. */
+       begin = rte_rdtsc();
+
+       for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+               /* rte_lpm_add(lpm, ip, depth, next_hop_add) */
+               status += rte_lpm_add(lpm, mae_west_tbl[i].ip,
+                               mae_west_tbl[i].depth, next_hop_add);
+       }
+       /* End Timer. */
+       end = rte_rdtsc();
+
+       TEST_LPM_ASSERT(status == 0);
+
+       /* Calculate average cycles per add. */
+       avg_ticks = (uint32_t) div64((end - begin),
+                       (uint64_t) NUM_ROUTE_ENTRIES);
+
+       uint64_t cache_line_counter = 0;
+       uint64_t count = 0;
+
+       /* Obtain add statistics. */
+       for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
+               if (lpm->tbl24[i].valid)
+                       lpm_used_entries++;
+
+               if (i % 32 == 0){
+                       if (count < lpm_used_entries) {
+                               cache_line_counter++;
+                               count = lpm_used_entries;
+                       }
+               }
+       }
+
+       printf("Number of table 24 entries =    %u\n",
+                       (unsigned) RTE_LPM_TBL24_NUM_ENTRIES);
+       printf("Used table 24 entries =         %u\n",
+                       (unsigned) lpm_used_entries);
+       printf("Percentage of table 24 entries used = %u\n",
+                       (unsigned) div64((lpm_used_entries * 100) ,
+                                       RTE_LPM_TBL24_NUM_ENTRIES));
+       printf("64 byte Cache entries used = %u \n",
+                       (unsigned) cache_line_counter);
+       printf("Cache Required = %u bytes\n\n",
+                       (unsigned) cache_line_counter * 64);
+
+       printf("Average LPM Add:        %u cycles\n", avg_ticks);
+
+       /* Lookup */
+
+       /* Choose random seed. */
+       rte_srand(0);
+       total_time = 0;
+       status = 0;
+       for (i = 0; i < (ITERATIONS / BATCH_SIZE); i ++) {
+               static uint32_t ip_batch[BATCH_SIZE];
+               uint64_t begin_batch, end_batch;
+
+               /* Generate a batch of random numbers */
+               for (j = 0; j < BATCH_SIZE; j ++) {
+                       ip_batch[j] = rte_rand();
+               }
+
+               /* Lookup per batch */
+               begin_batch = rte_rdtsc();
+
+               for (j = 0; j < BATCH_SIZE; j ++) {
+                       status += rte_lpm_lookup(lpm, ip_batch[j],
+                                       &next_hop_return);
+               }
+
+               end_batch = rte_rdtsc();
+               printf("status = %d\r", next_hop_return);
+               TEST_LPM_ASSERT(status < 1);
+
+               /* Accumulate batch time */
+               total_time += (end_batch - begin_batch);
+
+               TEST_LPM_ASSERT((status < -ENOENT) ||
+                                       (next_hop_return == next_hop_add));
+       }
+
+       avg_ticks = (uint32_t) div64(total_time, ITERATIONS);
+       printf("Average LPM Lookup:     %u cycles\n", avg_ticks);
+
+       /* Delete */
+       status = 0;
+       begin = rte_rdtsc();
+
+       for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
+               /* rte_lpm_delete(lpm, ip, depth) */
+               status += rte_lpm_delete(lpm, mae_west_tbl[i].ip,
+                               mae_west_tbl[i].depth);
+       }
+
+       end = rte_rdtsc();
+
+       TEST_LPM_ASSERT(status == 0);
+
+       avg_ticks = (uint32_t) div64((end - begin), NUM_ROUTE_ENTRIES);
+
+       printf("Average LPM Delete:     %u cycles\n", avg_ticks);
+
+       rte_lpm_delete_all(lpm);
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+
+
+/*
+ * Sequence of operations for find existing fbk hash table
+ *
+ *  - create table
+ *  - find existing table: hit
+ *  - find non-existing table: miss
+ *
+ */
+int32_t test16(void)
+{
+       struct rte_lpm *lpm = NULL, *result = NULL;
+
+       /* Create lpm  */
+       lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, 256 * 32, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       /* Try to find existing lpm */
+       result = rte_lpm_find_existing("lpm_find_existing");
+       TEST_LPM_ASSERT(result == lpm);
+
+       /* Try to find non-existing lpm */
+       result = rte_lpm_find_existing("lpm_find_non_existing");
+       TEST_LPM_ASSERT(result == NULL);
+
+       /* Cleanup. */
+       rte_lpm_delete_all(lpm);
+       rte_lpm_free(lpm);
+
+       return PASS;
+}
+
+/*
+ * test failure condition of overloading the tbl8 so no more will fit
+ * Check we get an error return value in that case
+ */
+static int32_t
+test17(void)
+{
+       uint32_t ip;
+       struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY,
+                       256 * 32, RTE_LPM_HEAP);
+
+       printf("Testing filling tbl8's\n");
+
+       /* ip loops through all positibilities for top 24 bits of address */
+       for (ip = 0; ip < 0xFFFFFF; ip++){
+               /* add an entrey within a different tbl8 each time, since
+                * depth >24 and the top 24 bits are different */
+               if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
+                       break;
+       }
+
+       if (ip != RTE_LPM_TBL8_NUM_GROUPS) {
+               printf("Error, unexpected failure with filling tbl8 groups\n");
+               printf("Failed after %u additions, expected after %u\n",
+                               (unsigned)ip, (unsigned)RTE_LPM_TBL8_NUM_GROUPS);
+       }
+
+       rte_lpm_free(lpm);
+       return 0;
+}
+
+/*
+ * Test 18
+ * Test for overwriting of tbl8:
+ *  - add rule /32 and lookup
+ *  - add new rule /24 and lookup
+ *     - add third rule /25 and lookup
+ *     - lookup /32 and /24 rule to ensure the table has not been overwritten.
+ */
+int32_t
+test18(void)
+{
+       struct rte_lpm *lpm = NULL;
+       const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
+       const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
+       const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
+       const uint8_t d_ip_10_32 = 32,
+                       d_ip_10_24 = 24,
+                       d_ip_20_25 = 25;
+       const uint8_t next_hop_ip_10_32 = 100,
+                       next_hop_ip_10_24 = 105,
+                       next_hop_ip_20_25 = 111;
+       uint8_t next_hop_return = 0;
+       int32_t status = 0;
+
+       lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, MAX_RULES, RTE_LPM_HEAP);
+       TEST_LPM_ASSERT(lpm != NULL);
+
+       status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, next_hop_ip_10_32);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+       TEST_LPM_ASSERT(status == 0);
+       TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+       status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, next_hop_ip_10_24);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+       TEST_LPM_ASSERT(status == 0);
+       TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+       status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, next_hop_ip_20_25);
+       TEST_LPM_ASSERT(status == 0);
+
+       status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
+       TEST_LPM_ASSERT(status == 0);
+       TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
+
+       status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
+       TEST_LPM_ASSERT(status == 0);
+       TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
+
+       status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
+       TEST_LPM_ASSERT(status == 0);
+       TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
+
+       rte_lpm_free(lpm);
+
+       printf("%s PASSED\n", __func__);
+       return PASS;
+}
+
+
+/*
+ * Do all unit and performance tests.
+ */
+
+int
+test_lpm(void)
+{
+       unsigned test_num;
+       int status, global_status;
+
+       printf("Running LPM tests...\n"
+              "Total number of test = %u\n", (unsigned) NUM_LPM_TESTS);
+
+       global_status = 0;
+
+       for (test_num = 0; test_num < NUM_LPM_TESTS; test_num++) {
+
+               status = tests[test_num]();
+
+               printf("LPM Test %u: %s\n", test_num,
+                               (status < 0) ? "FAIL" : "PASS");
+
+               if (status < 0) {
+                       global_status = status;
+               }
+       }
+
+       return global_status;
+}
+
+#else
+
+int
+test_lpm(void)
+{
+       printf("The LPM library is not included in this build\n");
+       return 0;
+}
+
+#endif
diff --git a/app/test/test_lpm_routes.h b/app/test/test_lpm_routes.h
new file mode 100644 (file)
index 0000000..85e885a
--- /dev/null
@@ -0,0 +1,28947 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _TEST_LPM_ROUTES_H_
+#define _TEST_LPM_ROUTES_H_
+
+#include <rte_ip.h>
+
+struct route_rule {
+       uint32_t ip;
+       uint8_t  depth;
+};
+
+static const struct route_rule mae_west_tbl[] =
+{
+       { IPv4(6,1,0,0),16 },
+       { IPv4(6,2,0,0),22 },
+       { IPv4(6,3,0,0),18 },
+       { IPv4(6,4,0,0),16 },
+       { IPv4(6,5,0,0),19 },
+       { IPv4(6,8,0,0),20 },
+       { IPv4(6,9,0,0),20 },
+       { IPv4(6,10,0,0),15 },
+       { IPv4(6,14,0,0),15 },
+       { IPv4(6,133,0,0),21 },
+       { IPv4(6,151,0,0),16 },
+       { IPv4(6,152,0,0),16 },
+       { IPv4(9,141,128,0),24 },
+       { IPv4(12,0,0,0),8 },
+       { IPv4(12,0,252,0),23 },
+       { IPv4(12,1,83,0),24 },
+       { IPv4(12,1,245,0),24 },
+       { IPv4(12,1,248,0),24 },
+       { IPv4(12,2,6,0),24 },
+       { IPv4(12,2,7,0),24 },
+       { IPv4(12,2,41,0),24 },
+       { IPv4(12,2,88,0),22 },
+       { IPv4(12,2,94,0),23 },
+       { IPv4(12,2,97,0),24 },
+       { IPv4(12,2,99,0),24 },
+       { IPv4(12,2,109,0),24 },
+       { IPv4(12,2,110,0),24 },
+       { IPv4(12,2,142,0),24 },
+       { IPv4(12,2,169,0),24 },
+       { IPv4(12,2,216,0),23 },
+       { IPv4(12,2,220,0),22 },
+       { IPv4(12,2,246,0),24 },
+       { IPv4(12,3,33,0),24 },
+       { IPv4(12,3,59,0),24 },
+       { IPv4(12,3,65,0),24 },
+       { IPv4(12,3,80,0),22 },
+       { IPv4(12,3,119,0),24 },
+       { IPv4(12,3,217,0),24 },
+       { IPv4(12,4,5,0),24 },
+       { IPv4(12,4,114,0),24 },
+       { IPv4(12,4,119,0),24 },
+       { IPv4(12,4,126,0),23 },
+       { IPv4(12,4,196,0),22 },
+       { IPv4(12,4,228,0),24 },
+       { IPv4(12,5,39,0),24 },
+       { IPv4(12,5,48,0),21 },
+       { IPv4(12,5,136,0),24 },
+       { IPv4(12,5,144,0),24 },
+       { IPv4(12,5,164,0),24 },
+       { IPv4(12,5,165,0),24 },
+       { IPv4(12,6,97,0),24 },
+       { IPv4(12,6,102,0),24 },
+       { IPv4(12,6,103,0),24 },
+       { IPv4(12,6,108,0),24 },
+       { IPv4(12,6,109,0),24 },
+       { IPv4(12,6,110,0),24 },
+       { IPv4(12,6,111,0),24 },
+       { IPv4(12,6,121,0),24 },
+       { IPv4(12,6,124,0),24 },
+       { IPv4(12,6,125,0),24 },
+       { IPv4(12,6,206,0),24 },
+       { IPv4(12,6,227,0),24 },
+       { IPv4(12,7,5,0),24 },
+       { IPv4(12,7,133,0),24 },
+       { IPv4(12,7,216,0),21 },
+       { IPv4(12,8,9,0),24 },
+       { IPv4(12,8,13,0),24 },
+       { IPv4(12,8,184,0),24 },
+       { IPv4(12,8,188,0),24 },
+       { IPv4(12,8,189,0),24 },
+       { IPv4(12,9,136,0),24 },
+       { IPv4(12,9,138,0),24 },
+       { IPv4(12,9,139,0),24 },
+       { IPv4(12,10,150,0),24 },
+       { IPv4(12,10,152,0),21 },
+       { IPv4(12,11,130,0),24 },
+       { IPv4(12,11,131,0),24 },
+       { IPv4(12,11,138,0),24 },
+       { IPv4(12,11,162,0),24 },
+       { IPv4(12,13,56,0),24 },
+       { IPv4(12,13,57,0),24 },
+       { IPv4(12,13,58,0),24 },
+       { IPv4(12,13,59,0),24 },
+       { IPv4(12,13,74,0),24 },
+       { IPv4(12,13,82,0),23 },
+       { IPv4(12,13,84,0),24 },
+       { IPv4(12,13,224,0),19 },
+       { IPv4(12,13,224,0),21 },
+       { IPv4(12,13,236,0),22 },
+       { IPv4(12,13,240,0),22 },
+       { IPv4(12,13,244,0),23 },
+       { IPv4(12,13,246,0),23 },
+       { IPv4(12,13,248,0),22 },
+       { IPv4(12,14,190,0),23 },
+       { IPv4(12,14,214,0),24 },
+       { IPv4(12,14,215,0),24 },
+       { IPv4(12,14,232,0),23 },
+       { IPv4(12,14,237,0),24 },
+       { IPv4(12,14,238,0),23 },
+       { IPv4(12,15,28,0),24 },
+       { IPv4(12,15,46,0),23 },
+       { IPv4(12,16,40,0),24 },
+       { IPv4(12,16,41,0),24 },
+       { IPv4(12,16,76,0),23 },
+       { IPv4(12,16,132,0),24 },
+       { IPv4(12,16,133,0),24 },
+       { IPv4(12,16,134,0),24 },
+       { IPv4(12,16,135,0),24 },
+       { IPv4(12,16,160,0),24 },
+       { IPv4(12,16,161,0),24 },
+       { IPv4(12,16,162,0),24 },
+       { IPv4(12,16,163,0),24 },
+       { IPv4(12,16,168,0),24 },
+       { IPv4(12,16,188,0),24 },
+       { IPv4(12,16,189,0),24 },
+       { IPv4(12,16,190,0),24 },
+       { IPv4(12,16,191,0),24 },
+       { IPv4(12,17,20,0),24 },
+       { IPv4(12,18,36,0),24 },
+       { IPv4(12,18,36,0),22 },
+       { IPv4(12,18,90,0),23 },
+       { IPv4(12,18,96,0),22 },
+       { IPv4(12,18,110,0),23 },
+       { IPv4(12,18,120,0),24 },
+       { IPv4(12,18,155,0),24 },
+       { IPv4(12,18,170,0),24 },
+       { IPv4(12,18,171,0),24 },
+       { IPv4(12,18,177,0),24 },
+       { IPv4(12,18,216,0),24 },
+       { IPv4(12,18,217,0),24 },
+       { IPv4(12,18,240,0),22 },
+       { IPv4(12,18,244,0),22 },
+       { IPv4(12,19,136,0),23 },
+       { IPv4(12,19,138,0),24 },
+       { IPv4(12,19,208,0),24 },
+       { IPv4(12,19,211,0),24 },
+       { IPv4(12,20,16,0),22 },
+       { IPv4(12,20,55,0),24 },
+       { IPv4(12,20,92,0),24 },
+       { IPv4(12,20,200,0),22 },
+       { IPv4(12,20,229,0),24 },
+       { IPv4(12,21,14,0),23 },
+       { IPv4(12,21,85,0),24 },
+       { IPv4(12,21,202,0),24 },
+       { IPv4(12,21,208,0),23 },
+       { IPv4(12,21,210,0),23 },
+       { IPv4(12,21,212,0),23 },
+       { IPv4(12,21,216,0),24 },
+       { IPv4(12,21,217,0),24 },
+       { IPv4(12,21,218,0),24 },
+       { IPv4(12,21,219,0),24 },
+       { IPv4(12,21,222,0),24 },
+       { IPv4(12,21,223,0),24 },
+       { IPv4(12,22,96,0),24 },
+       { IPv4(12,23,26,0),23 },
+       { IPv4(12,23,66,0),23 },
+       { IPv4(12,23,70,0),23 },
+       { IPv4(12,23,72,0),23 },
+       { IPv4(12,23,78,0),24 },
+       { IPv4(12,23,108,0),23 },
+       { IPv4(12,23,136,0),22 },
+       { IPv4(12,23,189,0),24 },
+       { IPv4(12,23,194,0),24 },
+       { IPv4(12,23,251,0),24 },
+       { IPv4(12,24,96,0),24 },
+       { IPv4(12,24,112,0),22 },
+       { IPv4(12,24,180,0),23 },
+       { IPv4(12,24,252,0),22 },
+       { IPv4(12,25,49,0),24 },
+       { IPv4(12,25,98,0),24 },
+       { IPv4(12,25,105,0),24 },
+       { IPv4(12,25,136,0),21 },
+       { IPv4(12,25,196,0),22 },
+       { IPv4(12,25,212,0),23 },
+       { IPv4(12,25,230,0),24 },
+       { IPv4(12,25,232,0),21 },
+       { IPv4(12,25,241,0),24 },
+       { IPv4(12,26,7,0),24 },
+       { IPv4(12,26,25,0),24 },
+       { IPv4(12,26,53,0),24 },
+       { IPv4(12,26,84,0),24 },
+       { IPv4(12,26,86,0),24 },
+       { IPv4(12,26,100,0),22 },
+       { IPv4(12,26,128,0),22 },
+       { IPv4(12,26,136,0),22 },
+       { IPv4(12,26,144,0),22 },
+       { IPv4(12,26,144,0),23 },
+       { IPv4(12,27,38,0),24 },
+       { IPv4(12,27,40,0),24 },
+       { IPv4(12,27,41,0),24 },
+       { IPv4(12,27,42,0),24 },
+       { IPv4(12,27,43,0),24 },
+       { IPv4(12,27,66,0),24 },
+       { IPv4(12,27,88,0),24 },
+       { IPv4(12,27,89,0),24 },
+       { IPv4(12,27,90,0),24 },
+       { IPv4(12,28,146,0),23 },
+       { IPv4(12,28,148,0),24 },
+       { IPv4(12,28,242,0),24 },
+       { IPv4(12,29,100,0),24 },
+       { IPv4(12,29,101,0),24 },
+       { IPv4(12,29,102,0),24 },
+       { IPv4(12,29,106,0),24 },
+       { IPv4(12,29,190,0),24 },
+       { IPv4(12,29,194,0),24 },
+       { IPv4(12,30,0,0),23 },
+       { IPv4(12,30,1,0),24 },
+       { IPv4(12,30,105,0),24 },
+       { IPv4(12,30,159,0),24 },
+       { IPv4(12,30,198,0),23 },
+       { IPv4(12,30,205,0),24 },
+       { IPv4(12,30,208,0),21 },
+       { IPv4(12,30,224,0),22 },
+       { IPv4(12,30,228,0),22 },
+       { IPv4(12,31,21,0),24 },
+       { IPv4(12,31,24,0),24 },
+       { IPv4(12,31,25,0),24 },
+       { IPv4(12,31,125,0),24 },
+       { IPv4(12,31,126,0),24 },
+       { IPv4(12,31,143,0),24 },
+       { IPv4(12,31,159,0),24 },
+       { IPv4(12,31,160,0),24 },
+       { IPv4(12,31,161,0),24 },
+       { IPv4(12,31,202,0),24 },
+       { IPv4(12,32,72,0),23 },
+       { IPv4(12,32,90,0),24 },
+       { IPv4(12,32,104,0),22 },
+       { IPv4(12,32,231,0),24 },
+       { IPv4(12,32,241,0),24 },
+       { IPv4(12,33,46,0),24 },
+       { IPv4(12,33,56,0),22 },
+       { IPv4(12,33,114,0),24 },
+       { IPv4(12,33,194,0),24 },
+       { IPv4(12,33,195,0),24 },
+       { IPv4(12,34,2,0),24 },
+       { IPv4(12,34,8,0),21 },
+       { IPv4(12,34,100,0),24 },
+       { IPv4(12,34,101,0),24 },
+       { IPv4(12,34,119,0),24 },
+       { IPv4(12,34,154,0),24 },
+       { IPv4(12,34,155,0),24 },
+       { IPv4(12,34,159,0),24 },
+       { IPv4(12,34,233,0),24 },
+       { IPv4(12,35,37,0),24 },
+       { IPv4(12,35,65,0),24 },
+       { IPv4(12,35,96,0),24 },
+       { IPv4(12,35,114,0),24 },
+       { IPv4(12,35,145,0),24 },
+       { IPv4(12,35,159,0),24 },
+       { IPv4(12,36,56,0),22 },
+       { IPv4(12,36,116,0),24 },
+       { IPv4(12,36,118,0),24 },
+       { IPv4(12,36,129,0),24 },
+       { IPv4(12,36,133,0),24 },
+       { IPv4(12,36,160,0),22 },
+       { IPv4(12,36,203,0),24 },
+       { IPv4(12,37,27,0),24 },
+       { IPv4(12,37,28,0),22 },
+       { IPv4(12,37,61,0),24 },
+       { IPv4(12,37,113,0),24 },
+       { IPv4(12,37,211,0),24 },
+       { IPv4(12,37,228,0),22 },
+       { IPv4(12,37,232,64),26 },
+       { IPv4(12,37,238,0),23 },
+       { IPv4(12,38,48,0),22 },
+       { IPv4(12,38,64,0),22 },
+       { IPv4(12,38,112,0),24 },
+       { IPv4(12,38,144,0),24 },
+       { IPv4(12,38,145,0),24 },
+       { IPv4(12,38,245,0),24 },
+       { IPv4(12,38,246,0),24 },
+       { IPv4(12,38,247,0),24 },
+       { IPv4(12,39,37,0),24 },
+       { IPv4(12,39,65,0),24 },
+       { IPv4(12,39,106,0),24 },
+       { IPv4(12,40,114,0),24 },
+       { IPv4(12,40,116,0),24 },
+       { IPv4(12,40,121,0),24 },
+       { IPv4(12,40,174,0),24 },
+       { IPv4(12,40,179,0),24 },
+       { IPv4(12,41,3,0),24 },
+       { IPv4(12,41,48,0),24 },
+       { IPv4(12,41,49,0),24 },
+       { IPv4(12,41,50,0),24 },
+       { IPv4(12,41,51,0),24 },
+       { IPv4(12,41,66,0),23 },
+       { IPv4(12,41,122,0),24 },
+       { IPv4(12,41,162,0),23 },
+       { IPv4(12,41,162,0),24 },
+       { IPv4(12,41,163,0),24 },
+       { IPv4(12,41,188,0),24 },
+       { IPv4(12,41,193,0),24 },
+       { IPv4(12,41,194,0),23 },
+       { IPv4(12,41,194,0),24 },
+       { IPv4(12,41,195,0),24 },
+       { IPv4(12,42,26,0),24 },
+       { IPv4(12,42,50,0),24 },
+       { IPv4(12,42,51,0),24 },
+       { IPv4(12,42,52,0),24 },
+       { IPv4(12,42,58,0),24 },
+       { IPv4(12,42,59,0),24 },
+       { IPv4(12,42,130,0),24 },
+       { IPv4(12,42,144,0),22 },
+       { IPv4(12,42,152,0),24 },
+       { IPv4(12,42,240,0),23 },
+       { IPv4(12,43,20,0),23 },
+       { IPv4(12,43,128,0),24 },
+       { IPv4(12,43,128,0),20 },
+       { IPv4(12,43,130,0),24 },
+       { IPv4(12,43,144,0),20 },
+       { IPv4(12,43,146,0),24 },
+       { IPv4(12,45,103,0),24 },
+       { IPv4(12,45,108,0),24 },
+       { IPv4(12,45,121,0),24 },
+       { IPv4(12,45,134,0),24 },
+       { IPv4(12,46,144,0),21 },
+       { IPv4(12,46,160,0),24 },
+       { IPv4(12,46,162,0),23 },
+       { IPv4(12,46,164,0),22 },
+       { IPv4(12,46,168,0),21 },
+       { IPv4(12,47,101,0),24 },
+       { IPv4(12,47,192,0),21 },
+       { IPv4(12,47,217,0),24 },
+       { IPv4(12,47,220,0),22 },
+       { IPv4(12,47,220,0),24 },
+       { IPv4(12,47,221,0),24 },
+       { IPv4(12,47,222,0),24 },
+       { IPv4(12,64,96,0),19 },
+       { IPv4(12,64,128,0),18 },
+       { IPv4(12,64,192,0),18 },
+       { IPv4(12,65,0,0),18 },
+       { IPv4(12,65,64,0),19 },
+       { IPv4(12,65,96,0),19 },
+       { IPv4(12,65,128,0),18 },
+       { IPv4(12,65,192,0),19 },
+       { IPv4(12,65,224,0),20 },
+       { IPv4(12,65,240,0),20 },
+       { IPv4(12,66,0,0),19 },
+       { IPv4(12,67,1,0),24 },
+       { IPv4(12,67,4,0),24 },
+       { IPv4(12,67,5,0),24 },
+       { IPv4(12,67,6,0),24 },
+       { IPv4(12,67,7,0),24 },
+       { IPv4(12,96,40,0),22 },
+       { IPv4(12,96,160,0),21 },
+       { IPv4(12,96,169,0),24 },
+       { IPv4(12,104,36,0),24 },
+       { IPv4(12,104,78,0),23 },
+       { IPv4(12,104,82,0),23 },
+       { IPv4(12,104,96,0),24 },
+       { IPv4(12,105,115,0),24 },
+       { IPv4(12,105,138,0),24 },
+       { IPv4(12,105,139,0),24 },
+       { IPv4(12,105,185,0),24 },
+       { IPv4(12,106,16,0),24 },
+       { IPv4(12,106,96,0),24 },
+       { IPv4(12,106,130,0),24 },
+       { IPv4(12,107,20,0),22 },
+       { IPv4(12,107,40,0),22 },
+       { IPv4(12,107,44,0),22 },
+       { IPv4(12,107,82,0),24 },
+       { IPv4(12,107,130,0),24 },
+       { IPv4(12,107,140,0),22 },
+       { IPv4(12,107,160,0),22 },
+       { IPv4(12,107,180,0),22 },
+       { IPv4(12,107,188,0),22 },
+       { IPv4(12,107,232,0),24 },
+       { IPv4(12,108,132,0),22 },
+       { IPv4(12,108,188,0),23 },
+       { IPv4(12,108,237,0),24 },
+       { IPv4(12,109,19,0),24 },
+       { IPv4(12,109,107,0),24 },
+       { IPv4(12,109,109,0),24 },
+       { IPv4(12,109,224,0),22 },
+       { IPv4(12,110,6,0),23 },
+       { IPv4(12,110,23,0),24 },
+       { IPv4(12,110,40,0),24 },
+       { IPv4(12,110,74,0),23 },
+       { IPv4(12,110,253,0),24 },
+       { IPv4(12,111,50,0),24 },
+       { IPv4(12,111,132,0),23 },
+       { IPv4(12,129,0,0),18 },
+       { IPv4(12,129,64,0),18 },
+       { IPv4(12,129,192,0),18 },
+       { IPv4(12,144,16,0),22 },
+       { IPv4(12,144,24,0),22 },
+       { IPv4(12,144,144,0),24 },
+       { IPv4(12,144,148,0),22 },
+       { IPv4(12,145,65,0),24 },
+       { IPv4(12,145,188,0),24 },
+       { IPv4(12,147,44,0),24 },
+       { IPv4(12,147,52,0),24 },
+       { IPv4(12,147,53,0),24 },
+       { IPv4(12,147,54,0),24 },
+       { IPv4(12,147,55,0),24 },
+       { IPv4(12,148,204,0),23 },
+       { IPv4(12,149,4,0),23 },
+       { IPv4(12,150,116,0),24 },
+       { IPv4(12,151,64,0),21 },
+       { IPv4(12,151,96,0),24 },
+       { IPv4(12,152,100,0),23 },
+       { IPv4(12,152,102,0),23 },
+       { IPv4(12,152,104,0),23 },
+       { IPv4(12,152,164,0),23 },
+       { IPv4(12,152,236,0),24 },
+       { IPv4(12,152,237,0),24 },
+       { IPv4(12,152,238,0),24 },
+       { IPv4(12,152,239,0),24 },
+       { IPv4(12,152,240,0),24 },
+       { IPv4(12,153,0,0),21 },
+       { IPv4(12,153,8,0),23 },
+       { IPv4(12,153,192,0),20 },
+       { IPv4(12,153,244,0),22 },
+       { IPv4(12,154,224,0),23 },
+       { IPv4(12,155,49,0),24 },
+       { IPv4(12,155,226,0),24 },
+       { IPv4(12,158,136,0),22 },
+       { IPv4(12,158,192,0),21 },
+       { IPv4(12,158,224,0),23 },
+       { IPv4(12,159,64,0),21 },
+       { IPv4(12,159,80,0),24 },
+       { IPv4(12,161,8,0),21 },
+       { IPv4(12,161,222,0),24 },
+       { IPv4(12,162,160,0),19 },
+       { IPv4(12,242,16,0),24 },
+       { IPv4(12,242,17,0),24 },
+       { IPv4(12,242,18,0),24 },
+       { IPv4(13,181,8,0),21 },
+       { IPv4(13,181,20,0),24 },
+       { IPv4(13,181,32,0),24 },
+       { IPv4(13,181,36,0),24 },
+       { IPv4(13,181,40,0),24 },
+       { IPv4(13,181,64,0),23 },
+       { IPv4(13,181,66,0),24 },
+       { IPv4(13,181,68,0),24 },
+       { IPv4(13,181,76,0),23 },
+       { IPv4(13,181,78,0),24 },
+       { IPv4(13,181,80,0),24 },
+       { IPv4(13,181,100,0),24 },
+       { IPv4(13,181,108,0),24 },
+       { IPv4(15,0,0,0),8 },
+       { IPv4(15,211,128,0),20 },
+       { IPv4(15,220,0,0),16 },
+       { IPv4(15,232,0,0),13 },
+       { IPv4(15,240,0,0),12 },
+       { IPv4(15,248,0,0),20 },
+       { IPv4(15,251,128,0),20 },
+       { IPv4(15,252,0,0),18 },
+       { IPv4(15,252,64,0),20 },
+       { IPv4(15,252,240,0),20 },
+       { IPv4(16,0,0,0),8 },
+       { IPv4(17,0,0,0),8 },
+       { IPv4(17,103,0,0),16 },
+       { IPv4(17,104,0,0),16 },
+       { IPv4(17,112,0,0),16 },
+       { IPv4(17,126,0,0),15 },
+       { IPv4(24,0,0,0),18 },
+       { IPv4(24,0,0,0),12 },
+       { IPv4(24,0,64,0),19 },
+       { IPv4(24,0,96,0),20 },
+       { IPv4(24,0,128,0),18 },
+       { IPv4(24,0,192,0),19 },
+       { IPv4(24,1,0,0),19 },
+       { IPv4(24,1,32,0),19 },
+       { IPv4(24,1,128,0),17 },
+       { IPv4(24,2,32,0),19 },
+       { IPv4(24,2,64,0),19 },
+       { IPv4(24,2,96,0),19 },
+       { IPv4(24,2,128,0),18 },
+       { IPv4(24,2,192,0),19 },
+       { IPv4(24,2,224,0),19 },
+       { IPv4(24,3,0,0),18 },
+       { IPv4(24,3,64,0),18 },
+       { IPv4(24,3,128,0),18 },
+       { IPv4(24,3,192,0),19 },
+       { IPv4(24,4,0,0),18 },
+       { IPv4(24,4,64,0),19 },
+       { IPv4(24,4,128,0),19 },
+       { IPv4(24,4,192,0),19 },
+       { IPv4(24,5,32,0),19 },
+       { IPv4(24,5,128,0),18 },
+       { IPv4(24,6,32,0),19 },
+       { IPv4(24,6,64,0),19 },
+       { IPv4(24,6,96,0),19 },
+       { IPv4(24,7,160,0),19 },
+       { IPv4(24,8,160,0),19 },
+       { IPv4(24,8,192,0),19 },
+       { IPv4(24,9,0,0),22 },
+       { IPv4(24,10,96,0),19 },
+       { IPv4(24,10,192,0),19 },
+       { IPv4(24,11,0,0),19 },
+       { IPv4(24,11,32,0),19 },
+       { IPv4(24,11,96,0),19 },
+       { IPv4(24,11,128,0),18 },
+       { IPv4(24,11,224,0),19 },
+       { IPv4(24,12,0,0),19 },
+       { IPv4(24,12,32,0),19 },
+       { IPv4(24,12,64,0),19 },
+       { IPv4(24,12,96,0),19 },
+       { IPv4(24,12,128,0),19 },
+       { IPv4(24,12,160,0),19 },
+       { IPv4(24,12,224,0),19 },
+       { IPv4(24,13,0,0),19 },
+       { IPv4(24,13,32,0),19 },
+       { IPv4(24,13,64,0),19 },
+       { IPv4(24,13,96,0),19 },
+       { IPv4(24,13,128,0),19 },
+       { IPv4(24,13,160,0),19 },
+       { IPv4(24,13,192,0),19 },
+       { IPv4(24,14,64,0),19 },
+       { IPv4(24,14,96,0),19 },
+       { IPv4(24,14,160,0),19 },
+       { IPv4(24,16,0,0),18 },
+       { IPv4(24,16,0,0),13 },
+       { IPv4(24,16,64,0),18 },
+       { IPv4(24,16,160,0),19 },
+       { IPv4(24,16,192,0),18 },
+       { IPv4(24,17,0,0),17 },
+       { IPv4(24,17,128,0),18 },
+       { IPv4(24,17,192,0),19 },
+       { IPv4(24,17,224,0),19 },
+       { IPv4(24,18,0,0),18 },
+       { IPv4(24,18,64,0),19 },
+       { IPv4(24,18,96,0),19 },
+       { IPv4(24,18,128,0),18 },
+       { IPv4(24,18,192,0),18 },
+       { IPv4(24,19,0,0),17 },
+       { IPv4(24,19,128,0),19 },
+       { IPv4(24,19,160,0),19 },
+       { IPv4(24,20,0,0),19 },
+       { IPv4(24,20,32,0),19 },
+       { IPv4(24,20,128,0),18 },
+       { IPv4(24,20,192,0),18 },
+       { IPv4(24,21,0,0),17 },
+       { IPv4(24,21,128,0),18 },
+       { IPv4(24,21,192,0),18 },
+       { IPv4(24,22,0,0),16 },
+       { IPv4(24,23,0,0),19 },
+       { IPv4(24,23,32,0),19 },
+       { IPv4(24,23,64,0),18 },
+       { IPv4(24,23,128,0),18 },
+       { IPv4(24,23,192,0),19 },
+       { IPv4(24,23,224,0),19 },
+       { IPv4(24,24,0,0),19 },
+       { IPv4(24,24,32,0),19 },
+       { IPv4(24,24,64,0),19 },
+       { IPv4(24,24,96,0),19 },
+       { IPv4(24,24,128,0),18 },
+       { IPv4(24,24,192,0),20 },
+       { IPv4(24,24,208,0),20 },
+       { IPv4(24,24,224,0),19 },
+       { IPv4(24,25,0,0),19 },
+       { IPv4(24,25,32,0),19 },
+       { IPv4(24,25,64,0),19 },
+       { IPv4(24,25,96,0),19 },
+       { IPv4(24,25,128,0),19 },
+       { IPv4(24,25,160,0),19 },
+       { IPv4(24,25,192,0),19 },
+       { IPv4(24,25,224,0),19 },
+       { IPv4(24,26,0,0),19 },
+       { IPv4(24,26,32,0),19 },
+       { IPv4(24,26,64,0),19 },
+       { IPv4(24,26,96,0),19 },
+       { IPv4(24,26,128,0),19 },
+       { IPv4(24,26,160,0),19 },
+       { IPv4(24,26,192,0),19 },
+       { IPv4(24,26,224,0),19 },
+       { IPv4(24,27,0,0),18 },
+       { IPv4(24,27,64,0),18 },
+       { IPv4(24,27,128,0),19 },
+       { IPv4(24,27,160,0),19 },
+       { IPv4(24,27,192,0),19 },
+       { IPv4(24,27,224,0),20 },
+       { IPv4(24,27,240,0),20 },
+       { IPv4(24,28,0,0),18 },
+       { IPv4(24,28,64,0),19 },
+       { IPv4(24,28,96,0),19 },
+       { IPv4(24,28,128,0),19 },
+       { IPv4(24,28,160,0),19 },
+       { IPv4(24,28,192,0),19 },
+       { IPv4(24,28,224,0),19 },
+       { IPv4(24,29,0,0),19 },
+       { IPv4(24,29,32,0),19 },
+       { IPv4(24,29,64,0),19 },
+       { IPv4(24,29,96,0),19 },
+       { IPv4(24,29,128,0),19 },
+       { IPv4(24,29,160,0),20 },
+       { IPv4(24,29,176,0),20 },
+       { IPv4(24,29,192,0),18 },
+       { IPv4(24,30,0,0),18 },
+       { IPv4(24,30,64,0),19 },
+       { IPv4(24,30,96,0),19 },
+       { IPv4(24,30,128,0),19 },
+       { IPv4(24,30,160,0),19 },
+       { IPv4(24,30,192,0),19 },
+       { IPv4(24,30,224,0),19 },
+       { IPv4(24,31,0,0),19 },
+       { IPv4(24,31,32,0),19 },
+       { IPv4(24,31,64,0),19 },
+       { IPv4(24,31,96,0),19 },
+       { IPv4(24,31,128,0),19 },
+       { IPv4(24,31,160,0),19 },
+       { IPv4(24,31,192,0),19 },
+       { IPv4(24,31,224,0),19 },
+       { IPv4(24,33,0,0),19 },
+       { IPv4(24,34,0,0),16 },
+       { IPv4(24,36,0,0),14 },
+       { IPv4(24,36,0,0),16 },
+       { IPv4(24,37,0,0),17 },
+       { IPv4(24,37,128,0),17 },
+       { IPv4(24,38,0,0),17 },
+       { IPv4(24,38,128,0),18 },
+       { IPv4(24,38,192,0),18 },
+       { IPv4(24,39,0,0),17 },
+       { IPv4(24,39,128,0),17 },
+       { IPv4(24,40,0,0),18 },
+       { IPv4(24,40,64,0),20 },
+       { IPv4(24,41,0,0),18 },
+       { IPv4(24,41,64,0),19 },
+       { IPv4(24,42,0,0),15 },
+       { IPv4(24,56,0,0),18 },
+       { IPv4(24,60,0,0),16 },
+       { IPv4(24,61,0,0),17 },
+       { IPv4(24,61,128,0),19 },
+       { IPv4(24,64,0,0),19 },
+       { IPv4(24,64,0,0),13 },
+       { IPv4(24,64,32,0),19 },
+       { IPv4(24,64,64,0),19 },
+       { IPv4(24,64,96,0),19 },
+       { IPv4(24,64,128,0),19 },
+       { IPv4(24,64,192,0),19 },
+       { IPv4(24,64,224,0),19 },
+       { IPv4(24,65,0,0),19 },
+       { IPv4(24,65,32,0),19 },
+       { IPv4(24,65,64,0),19 },
+       { IPv4(24,65,96,0),19 },
+       { IPv4(24,65,128,0),19 },
+       { IPv4(24,65,160,0),19 },
+       { IPv4(24,65,192,0),19 },
+       { IPv4(24,65,224,0),19 },
+       { IPv4(24,66,0,0),19 },
+       { IPv4(24,66,32,0),19 },
+       { IPv4(24,66,64,0),19 },
+       { IPv4(24,66,96,0),19 },
+       { IPv4(24,66,128,0),19 },
+       { IPv4(24,66,160,0),19 },
+       { IPv4(24,66,192,0),19 },
+       { IPv4(24,66,224,0),19 },
+       { IPv4(24,67,0,0),19 },
+       { IPv4(24,67,32,0),19 },
+       { IPv4(24,67,64,0),19 },
+       { IPv4(24,67,96,0),19 },
+       { IPv4(24,67,128,0),19 },
+       { IPv4(24,67,160,0),19 },
+       { IPv4(24,67,192,0),19 },
+       { IPv4(24,67,224,0),19 },
+       { IPv4(24,68,0,0),19 },
+       { IPv4(24,68,32,0),19 },
+       { IPv4(24,68,96,0),19 },
+       { IPv4(24,68,128,0),19 },
+       { IPv4(24,68,160,0),19 },
+       { IPv4(24,68,192,0),19 },
+       { IPv4(24,68,224,0),19 },
+       { IPv4(24,69,0,0),19 },
+       { IPv4(24,69,32,0),19 },
+       { IPv4(24,69,64,0),19 },
+       { IPv4(24,69,96,0),19 },
+       { IPv4(24,69,192,0),19 },
+       { IPv4(24,69,224,0),19 },
+       { IPv4(24,70,0,0),19 },
+       { IPv4(24,70,32,0),19 },
+       { IPv4(24,70,64,0),19 },
+       { IPv4(24,70,96,0),19 },
+       { IPv4(24,70,128,0),19 },
+       { IPv4(24,70,160,0),19 },
+       { IPv4(24,70,192,0),19 },
+       { IPv4(24,70,224,0),19 },
+       { IPv4(24,71,0,0),19 },
+       { IPv4(24,71,32,0),19 },
+       { IPv4(24,71,64,0),19 },
+       { IPv4(24,71,96,0),19 },
+       { IPv4(24,71,160,0),19 },
+       { IPv4(24,71,192,0),19 },
+       { IPv4(24,71,224,0),19 },
+       { IPv4(24,76,0,0),14 },
+       { IPv4(24,76,0,0),19 },
+       { IPv4(24,76,32,0),19 },
+       { IPv4(24,76,64,0),19 },
+       { IPv4(24,76,96,0),19 },
+       { IPv4(24,76,128,0),19 },
+       { IPv4(24,76,160,0),19 },
+       { IPv4(24,76,192,0),19 },
+       { IPv4(24,76,224,0),19 },
+       { IPv4(24,77,0,0),19 },
+       { IPv4(24,77,32,0),19 },
+       { IPv4(24,77,64,0),19 },
+       { IPv4(24,77,96,0),19 },
+       { IPv4(24,77,128,0),19 },
+       { IPv4(24,77,160,0),19 },
+       { IPv4(24,77,192,0),19 },
+       { IPv4(24,77,224,0),19 },
+       { IPv4(24,78,0,0),19 },
+       { IPv4(24,78,32,0),19 },
+       { IPv4(24,78,64,0),19 },
+       { IPv4(24,78,96,0),19 },
+       { IPv4(24,78,128,0),19 },
+       { IPv4(24,78,160,0),19 },
+       { IPv4(24,78,192,0),19 },
+       { IPv4(24,78,224,0),19 },
+       { IPv4(24,79,0,0),19 },
+       { IPv4(24,79,32,0),19 },
+       { IPv4(24,79,64,0),19 },
+       { IPv4(24,79,96,0),19 },
+       { IPv4(24,79,128,0),19 },
+       { IPv4(24,79,160,0),19 },
+       { IPv4(24,79,192,0),19 },
+       { IPv4(24,79,224,0),19 },
+       { IPv4(24,80,0,0),19 },
+       { IPv4(24,80,0,0),14 },
+       { IPv4(24,80,32,0),19 },
+       { IPv4(24,80,128,0),19 },
+       { IPv4(24,80,160,0),19 },
+       { IPv4(24,81,0,0),19 },
+       { IPv4(24,81,32,0),19 },
+       { IPv4(24,81,64,0),19 },
+       { IPv4(24,81,96,0),19 },
+       { IPv4(24,81,128,0),19 },
+       { IPv4(24,81,160,0),19 },
+       { IPv4(24,81,192,0),19 },
+       { IPv4(24,81,224,0),19 },
+       { IPv4(24,82,0,0),19 },
+       { IPv4(24,82,32,0),19 },
+       { IPv4(24,82,64,0),19 },
+       { IPv4(24,82,96,0),19 },
+       { IPv4(24,82,128,0),19 },
+       { IPv4(24,82,160,0),19 },
+       { IPv4(24,82,192,0),19 },
+       { IPv4(24,82,224,0),19 },
+       { IPv4(24,83,0,0),19 },
+       { IPv4(24,83,32,0),19 },
+       { IPv4(24,83,64,0),19 },
+       { IPv4(24,83,96,0),19 },
+       { IPv4(24,83,128,0),19 },
+       { IPv4(24,88,0,0),18 },
+       { IPv4(24,88,64,0),19 },
+       { IPv4(24,88,96,0),19 },
+       { IPv4(24,88,128,0),19 },
+       { IPv4(24,88,160,0),19 },
+       { IPv4(24,88,192,0),19 },
+       { IPv4(24,88,224,0),20 },
+       { IPv4(24,88,240,0),20 },
+       { IPv4(24,90,0,0),19 },
+       { IPv4(24,91,0,0),16 },
+       { IPv4(24,91,64,0),19 },
+       { IPv4(24,91,96,0),19 },
+       { IPv4(24,91,128,0),17 },
+       { IPv4(24,92,0,0),19 },
+       { IPv4(24,92,32,0),19 },
+       { IPv4(24,92,64,0),19 },
+       { IPv4(24,92,96,0),19 },
+       { IPv4(24,92,128,0),20 },
+       { IPv4(24,92,144,0),20 },
+       { IPv4(24,92,160,0),19 },
+       { IPv4(24,92,192,0),19 },
+       { IPv4(24,92,224,0),19 },
+       { IPv4(24,93,0,0),19 },
+       { IPv4(24,93,32,0),19 },
+       { IPv4(24,93,64,0),19 },
+       { IPv4(24,93,96,0),19 },
+       { IPv4(24,93,128,0),19 },
+       { IPv4(24,93,160,0),19 },
+       { IPv4(24,93,192,0),19 },
+       { IPv4(24,93,224,0),19 },
+       { IPv4(24,94,0,0),19 },
+       { IPv4(24,94,32,0),19 },
+       { IPv4(24,94,64,0),24 },
+       { IPv4(24,94,64,0),19 },
+       { IPv4(24,94,96,0),19 },
+       { IPv4(24,94,128,0),19 },
+       { IPv4(24,94,160,0),19 },
+       { IPv4(24,94,192,0),19 },
+       { IPv4(24,94,224,0),19 },
+       { IPv4(24,95,0,0),19 },
+       { IPv4(24,95,32,0),19 },
+       { IPv4(24,95,64,0),19 },
+       { IPv4(24,95,96,0),19 },
+       { IPv4(24,95,128,0),19 },
+       { IPv4(24,95,160,0),19 },
+       { IPv4(24,95,192,0),19 },
+       { IPv4(24,95,224,0),19 },
+       { IPv4(24,98,0,0),17 },
+       { IPv4(24,98,128,0),18 },
+       { IPv4(24,98,192,0),20 },
+       { IPv4(24,100,0,0),15 },
+       { IPv4(24,102,0,0),17 },
+       { IPv4(24,102,128,0),18 },
+       { IPv4(24,102,192,0),19 },
+       { IPv4(24,102,224,0),19 },
+       { IPv4(24,103,0,0),17 },
+       { IPv4(24,103,128,0),19 },
+       { IPv4(24,103,160,0),20 },
+       { IPv4(24,104,0,0),21 },
+       { IPv4(24,104,0,0),18 },
+       { IPv4(24,104,8,0),21 },
+       { IPv4(24,104,40,0),21 },
+       { IPv4(24,104,48,0),21 },
+       { IPv4(24,104,64,0),19 },
+       { IPv4(24,104,72,0),21 },
+       { IPv4(24,108,0,0),18 },
+       { IPv4(24,108,64,0),19 },
+       { IPv4(24,108,96,0),19 },
+       { IPv4(24,108,128,0),19 },
+       { IPv4(24,108,160,0),19 },
+       { IPv4(24,108,192,0),19 },
+       { IPv4(24,108,224,0),19 },
+       { IPv4(24,109,0,0),19 },
+       { IPv4(24,109,32,0),20 },
+       { IPv4(24,109,32,0),21 },
+       { IPv4(24,109,48,0),20 },
+       { IPv4(24,109,64,0),19 },
+       { IPv4(24,109,96,0),20 },
+       { IPv4(24,112,0,0),16 },
+       { IPv4(24,112,112,0),20 },
+       { IPv4(24,114,0,0),16 },
+       { IPv4(24,116,0,0),23 },
+       { IPv4(24,116,0,0),24 },
+       { IPv4(24,116,0,0),17 },
+       { IPv4(24,116,2,0),24 },
+       { IPv4(24,116,3,0),24 },
+       { IPv4(24,116,4,0),24 },
+       { IPv4(24,116,5,0),24 },
+       { IPv4(24,116,6,0),24 },
+       { IPv4(24,116,7,0),24 },
+       { IPv4(24,116,8,0),24 },
+       { IPv4(24,116,10,0),24 },
+       { IPv4(24,116,11,0),24 },
+       { IPv4(24,116,12,0),23 },
+       { IPv4(24,116,14,0),24 },
+       { IPv4(24,116,15,0),24 },
+       { IPv4(24,116,16,0),23 },
+       { IPv4(24,116,18,0),23 },
+       { IPv4(24,116,18,0),24 },
+       { IPv4(24,116,25,0),24 },
+       { IPv4(24,116,26,0),23 },
+       { IPv4(24,116,28,0),22 },
+       { IPv4(24,116,32,0),23 },
+       { IPv4(24,116,36,0),24 },
+       { IPv4(24,116,37,0),24 },
+       { IPv4(24,116,38,0),24 },
+       { IPv4(24,116,39,0),24 },
+       { IPv4(24,116,56,0),21 },
+       { IPv4(24,116,64,0),21 },
+       { IPv4(24,116,72,0),23 },
+       { IPv4(24,116,74,0),23 },
+       { IPv4(24,116,76,0),22 },
+       { IPv4(24,116,80,0),23 },
+       { IPv4(24,116,82,0),23 },
+       { IPv4(24,116,84,0),22 },
+       { IPv4(24,116,88,0),22 },
+       { IPv4(24,116,92,0),22 },
+       { IPv4(24,116,96,0),22 },
+       { IPv4(24,116,100,0),23 },
+       { IPv4(24,116,102,0),23 },
+       { IPv4(24,116,104,0),23 },
+       { IPv4(24,116,106,0),24 },
+       { IPv4(24,116,107,0),24 },
+       { IPv4(24,116,108,0),23 },
+       { IPv4(24,116,110,0),23 },
+       { IPv4(24,116,112,0),23 },
+       { IPv4(24,116,114,0),23 },
+       { IPv4(24,116,116,0),22 },
+       { IPv4(24,116,116,0),23 },
+       { IPv4(24,116,118,0),24 },
+       { IPv4(24,116,120,0),22 },
+       { IPv4(24,116,124,0),23 },
+       { IPv4(24,116,126,0),23 },
+       { IPv4(24,116,128,0),19 },
+       { IPv4(24,116,128,0),20 },
+       { IPv4(24,116,144,0),21 },
+       { IPv4(24,116,152,0),21 },
+       { IPv4(24,116,160,0),19 },
+       { IPv4(24,116,162,0),23 },
+       { IPv4(24,116,164,0),22 },
+       { IPv4(24,116,170,0),23 },
+       { IPv4(24,116,172,0),22 },
+       { IPv4(24,116,176,0),24 },
+       { IPv4(24,116,178,0),24 },
+       { IPv4(24,116,180,0),24 },
+       { IPv4(24,118,0,0),17 },
+       { IPv4(24,118,128,0),20 },
+       { IPv4(24,120,0,0),19 },
+       { IPv4(24,120,32,0),19 },
+       { IPv4(24,120,64,0),19 },
+       { IPv4(24,120,96,0),19 },
+       { IPv4(24,120,128,0),19 },
+       { IPv4(24,120,160,0),19 },
+       { IPv4(24,121,31,0),24 },
+       { IPv4(24,125,0,0),18 },
+       { IPv4(24,125,64,0),20 },
+       { IPv4(24,126,0,0),16 },
+       { IPv4(24,127,0,0),17 },
+       { IPv4(24,127,128,0),18 },
+       { IPv4(24,128,0,0),16 },
+       { IPv4(24,128,6,0),24 },
+       { IPv4(24,128,191,0),24 },
+       { IPv4(24,129,0,0),17 },
+       { IPv4(24,129,2,0),24 },
+       { IPv4(24,129,4,0),24 },
+       { IPv4(24,129,24,0),23 },
+       { IPv4(24,129,26,0),24 },
+       { IPv4(24,129,28,0),24 },
+       { IPv4(24,129,32,0),24 },
+       { IPv4(24,129,42,0),24 },
+       { IPv4(24,129,128,0),18 },
+       { IPv4(24,129,192,0),19 },
+       { IPv4(24,130,0,0),18 },
+       { IPv4(24,130,64,0),19 },
+       { IPv4(24,130,96,0),19 },
+       { IPv4(24,130,192,0),19 },
+       { IPv4(24,131,0,0),18 },
+       { IPv4(24,131,64,0),18 },
+       { IPv4(24,131,128,0),18 },
+       { IPv4(24,131,192,0),18 },
+       { IPv4(24,136,0,0),19 },
+       { IPv4(24,136,32,0),19 },
+       { IPv4(24,136,64,0),20 },
+       { IPv4(24,136,64,0),22 },
+       { IPv4(24,136,68,0),23 },
+       { IPv4(24,136,70,0),24 },
+       { IPv4(24,136,128,0),21 },
+       { IPv4(24,136,136,0),22 },
+       { IPv4(24,136,140,0),22 },
+       { IPv4(24,136,144,0),22 },
+       { IPv4(24,136,150,0),23 },
+       { IPv4(24,136,152,0),21 },
+       { IPv4(24,136,160,0),21 },
+       { IPv4(24,137,0,0),19 },
+       { IPv4(24,139,0,0),19 },
+       { IPv4(24,140,0,0),19 },
+       { IPv4(24,140,32,0),19 },
+       { IPv4(24,141,0,0),20 },
+       { IPv4(24,141,0,0),16 },
+       { IPv4(24,141,16,0),20 },
+       { IPv4(24,141,32,0),20 },
+       { IPv4(24,141,48,0),20 },
+       { IPv4(24,141,80,0),20 },
+       { IPv4(24,141,96,0),20 },
+       { IPv4(24,141,112,0),20 },
+       { IPv4(24,141,128,0),20 },
+       { IPv4(24,141,144,0),20 },
+       { IPv4(24,141,160,0),20 },
+       { IPv4(24,141,192,0),20 },
+       { IPv4(24,141,224,0),20 },
+       { IPv4(24,141,240,0),20 },
+       { IPv4(24,142,32,0),19 },
+       { IPv4(24,142,40,0),22 },
+       { IPv4(24,142,44,0),22 },
+       { IPv4(24,142,76,0),23 },
+       { IPv4(24,142,88,0),23 },
+       { IPv4(24,142,92,0),22 },
+       { IPv4(24,142,96,0),22 },
+       { IPv4(24,142,100,0),23 },
+       { IPv4(24,142,160,0),19 },
+       { IPv4(24,142,178,0),24 },
+       { IPv4(24,142,192,0),18 },
+       { IPv4(24,142,205,0),24 },
+       { IPv4(24,145,128,0),19 },
+       { IPv4(24,145,128,0),21 },
+       { IPv4(24,145,136,0),22 },
+       { IPv4(24,145,140,0),22 },
+       { IPv4(24,145,144,0),21 },
+       { IPv4(24,145,152,0),22 },
+       { IPv4(24,145,156,0),23 },
+       { IPv4(24,145,158,0),23 },
+       { IPv4(24,145,160,0),20 },
+       { IPv4(24,145,168,0),21 },
+       { IPv4(24,147,0,0),16 },
+       { IPv4(24,148,0,0),18 },
+       { IPv4(24,148,64,0),19 },
+       { IPv4(24,150,0,0),20 },
+       { IPv4(24,150,0,0),16 },
+       { IPv4(24,150,16,0),20 },
+       { IPv4(24,150,48,0),20 },
+       { IPv4(24,150,64,0),20 },
+       { IPv4(24,150,80,0),20 },
+       { IPv4(24,150,96,0),20 },
+       { IPv4(24,150,112,0),20 },
+       { IPv4(24,150,128,0),20 },
+       { IPv4(24,150,144,0),20 },
+       { IPv4(24,150,160,0),20 },
+       { IPv4(24,150,176,0),20 },
+       { IPv4(24,150,224,0),20 },
+       { IPv4(24,150,240,0),20 },
+       { IPv4(24,151,0,0),19 },
+       { IPv4(24,151,32,0),20 },
+       { IPv4(24,151,32,0),19 },
+       { IPv4(24,151,40,0),22 },
+       { IPv4(24,151,44,0),23 },
+       { IPv4(24,151,46,0),23 },
+       { IPv4(24,151,48,0),24 },
+       { IPv4(24,151,48,0),20 },
+       { IPv4(24,151,49,0),24 },
+       { IPv4(24,151,50,0),23 },
+       { IPv4(24,151,52,0),22 },
+       { IPv4(24,151,60,0),23 },
+       { IPv4(24,151,62,0),24 },
+       { IPv4(24,151,63,0),24 },
+       { IPv4(24,151,64,0),20 },
+       { IPv4(24,151,80,0),21 },
+       { IPv4(24,151,88,0),22 },
+       { IPv4(24,151,92,0),22 },
+       { IPv4(24,153,0,0),18 },
+       { IPv4(24,155,0,0),19 },
+       { IPv4(24,155,9,0),24 },
+       { IPv4(24,155,10,0),24 },
+       { IPv4(24,156,0,0),19 },
+       { IPv4(24,156,0,0),16 },
+       { IPv4(24,157,0,0),19 },
+       { IPv4(24,157,0,0),18 },
+       { IPv4(24,157,0,0),17 },
+       { IPv4(24,157,64,0),19 },
+       { IPv4(24,157,128,0),18 },
+       { IPv4(24,157,192,0),19 },
+       { IPv4(24,157,224,0),19 },
+       { IPv4(24,158,32,0),19 },
+       { IPv4(24,158,64,0),20 },
+       { IPv4(24,158,80,0),20 },
+       { IPv4(24,158,96,0),19 },
+       { IPv4(24,158,128,0),20 },
+       { IPv4(24,158,192,0),20 },
+       { IPv4(24,158,240,0),20 },
+       { IPv4(24,159,0,0),20 },
+       { IPv4(24,159,32,0),20 },
+       { IPv4(24,159,48,0),20 },
+       { IPv4(24,159,64,0),21 },
+       { IPv4(24,159,64,0),20 },
+       { IPv4(24,159,72,0),21 },
+       { IPv4(24,159,80,0),20 },
+       { IPv4(24,159,96,0),20 },
+       { IPv4(24,159,112,0),20 },
+       { IPv4(24,159,128,0),20 },
+       { IPv4(24,159,164,0),22 },
+       { IPv4(24,159,168,0),23 },
+       { IPv4(24,159,170,0),23 },
+       { IPv4(24,159,172,0),23 },
+       { IPv4(24,159,175,0),24 },
+       { IPv4(24,159,176,0),20 },
+       { IPv4(24,159,208,0),20 },
+       { IPv4(24,160,0,0),19 },
+       { IPv4(24,160,32,0),20 },
+       { IPv4(24,160,48,0),20 },
+       { IPv4(24,160,64,0),18 },
+       { IPv4(24,160,128,0),19 },
+       { IPv4(24,160,160,0),19 },
+       { IPv4(24,160,192,0),19 },
+       { IPv4(24,160,224,0),19 },
+       { IPv4(24,161,0,0),18 },
+       { IPv4(24,161,64,0),19 },
+       { IPv4(24,161,96,0),19 },
+       { IPv4(24,161,128,0),19 },
+       { IPv4(24,161,160,0),19 },
+       { IPv4(24,161,192,0),19 },
+       { IPv4(24,161,224,0),19 },
+       { IPv4(24,162,0,0),18 },
+       { IPv4(24,162,64,0),19 },
+       { IPv4(24,162,96,0),19 },
+       { IPv4(24,162,128,0),19 },
+       { IPv4(24,162,160,0),19 },
+       { IPv4(24,162,192,0),19 },
+       { IPv4(24,162,224,0),19 },
+       { IPv4(24,163,0,0),19 },
+       { IPv4(24,163,32,0),20 },
+       { IPv4(24,163,48,0),20 },
+       { IPv4(24,163,64,0),19 },
+       { IPv4(24,163,96,0),19 },
+       { IPv4(24,163,128,0),20 },
+       { IPv4(24,163,144,0),20 },
+       { IPv4(24,163,160,0),19 },
+       { IPv4(24,163,192,0),19 },
+       { IPv4(24,163,224,0),20 },
+       { IPv4(24,163,240,0),20 },
+       { IPv4(24,164,0,0),19 },
+       { IPv4(24,164,0,0),18 },
+       { IPv4(24,164,32,0),19 },
+       { IPv4(24,164,64,0),19 },
+       { IPv4(24,164,96,0),19 },
+       { IPv4(24,164,128,0),19 },
+       { IPv4(24,164,160,0),20 },
+       { IPv4(24,164,176,0),20 },
+       { IPv4(24,164,192,0),19 },
+       { IPv4(24,164,224,0),19 },
+       { IPv4(24,165,0,0),19 },
+       { IPv4(24,165,32,0),19 },
+       { IPv4(24,165,64,0),20 },
+       { IPv4(24,165,80,0),20 },
+       { IPv4(24,165,96,0),20 },
+       { IPv4(24,165,112,0),20 },
+       { IPv4(24,165,128,0),18 },
+       { IPv4(24,165,192,0),19 },
+       { IPv4(24,165,224,0),19 },
+       { IPv4(24,166,0,0),19 },
+       { IPv4(24,166,0,0),18 },
+       { IPv4(24,166,32,0),19 },
+       { IPv4(24,166,64,0),18 },
+       { IPv4(24,166,128,0),19 },
+       { IPv4(24,166,160,0),19 },
+       { IPv4(24,166,192,0),19 },
+       { IPv4(24,166,224,0),20 },
+       { IPv4(24,166,240,0),20 },
+       { IPv4(24,167,0,0),18 },
+       { IPv4(24,167,64,0),19 },
+       { IPv4(24,167,96,0),19 },
+       { IPv4(24,167,128,0),19 },
+       { IPv4(24,167,160,0),19 },
+       { IPv4(24,167,192,0),19 },
+       { IPv4(24,167,224,0),19 },
+       { IPv4(24,168,0,0),18 },
+       { IPv4(24,168,64,0),19 },
+       { IPv4(24,168,96,0),19 },
+       { IPv4(24,168,128,0),19 },
+       { IPv4(24,168,192,0),19 },
+       { IPv4(24,168,224,0),19 },
+       { IPv4(24,169,0,0),19 },
+       { IPv4(24,169,32,0),19 },
+       { IPv4(24,169,64,0),19 },
+       { IPv4(24,169,96,0),19 },
+       { IPv4(24,169,128,0),19 },
+       { IPv4(24,169,160,0),20 },
+       { IPv4(24,169,176,0),20 },
+       { IPv4(24,169,192,0),20 },
+       { IPv4(24,169,208,0),20 },
+       { IPv4(24,169,224,0),20 },
+       { IPv4(24,169,240,0),20 },
+       { IPv4(24,170,0,0),19 },
+       { IPv4(24,170,32,0),19 },
+       { IPv4(24,170,64,0),19 },
+       { IPv4(24,170,96,0),19 },
+       { IPv4(24,170,128,0),20 },
+       { IPv4(24,170,144,0),21 },
+       { IPv4(24,170,152,0),22 },
+       { IPv4(24,170,156,0),23 },
+       { IPv4(24,170,158,0),23 },
+       { IPv4(24,176,0,0),17 },
+       { IPv4(24,176,0,0),13 },
+       { IPv4(24,176,0,0),14 },
+       { IPv4(24,176,128,0),17 },
+       { IPv4(24,177,0,0),17 },
+       { IPv4(24,177,128,0),17 },
+       { IPv4(24,178,0,0),16 },
+       { IPv4(24,179,0,0),17 },
+       { IPv4(24,179,128,0),17 },
+       { IPv4(24,180,0,0),15 },
+       { IPv4(24,180,0,0),17 },
+       { IPv4(24,180,128,0),17 },
+       { IPv4(24,181,0,0),17 },
+       { IPv4(24,181,128,0),17 },
+       { IPv4(24,182,0,0),16 },
+       { IPv4(24,183,0,0),16 },
+       { IPv4(24,196,16,0),20 },
+       { IPv4(24,196,32,0),20 },
+       { IPv4(24,196,48,0),20 },
+       { IPv4(24,196,160,0),20 },
+       { IPv4(24,196,176,0),20 },
+       { IPv4(24,196,200,0),21 },
+       { IPv4(24,196,224,0),20 },
+       { IPv4(24,196,241,0),24 },
+       { IPv4(24,196,244,0),22 },
+       { IPv4(24,196,252,0),22 },
+       { IPv4(24,197,0,0),24 },
+       { IPv4(24,197,2,0),24 },
+       { IPv4(24,197,4,0),22 },
+       { IPv4(24,197,8,0),22 },
+       { IPv4(24,197,12,0),22 },
+       { IPv4(24,197,16,0),22 },
+       { IPv4(24,197,32,0),19 },
+       { IPv4(24,197,64,0),19 },
+       { IPv4(24,197,96,0),21 },
+       { IPv4(24,197,104,0),23 },
+       { IPv4(24,197,112,0),20 },
+       { IPv4(24,197,128,0),20 },
+       { IPv4(24,198,0,0),18 },
+       { IPv4(24,198,64,0),19 },
+       { IPv4(24,198,96,0),20 },
+       { IPv4(24,204,0,0),17 },
+       { IPv4(24,206,0,0),20 },
+       { IPv4(24,206,64,0),19 },
+       { IPv4(24,206,160,0),20 },
+       { IPv4(24,207,0,0),18 },
+       { IPv4(24,207,128,0),18 },
+       { IPv4(24,208,0,0),18 },
+       { IPv4(24,213,0,0),21 },
+       { IPv4(24,213,8,0),22 },
+       { IPv4(24,213,12,0),22 },
+       { IPv4(24,213,20,0),22 },
+       { IPv4(24,213,24,0),22 },
+       { IPv4(24,213,28,0),22 },
+       { IPv4(24,213,32,0),19 },
+       { IPv4(24,213,60,0),24 },
+       { IPv4(24,214,0,0),18 },
+       { IPv4(24,214,1,0),24 },
+       { IPv4(24,214,3,0),24 },
+       { IPv4(24,214,4,0),24 },
+       { IPv4(24,214,5,0),24 },
+       { IPv4(24,214,6,0),24 },
+       { IPv4(24,214,7,0),24 },
+       { IPv4(24,214,8,0),24 },
+       { IPv4(24,214,9,0),24 },
+       { IPv4(24,214,10,0),24 },
+       { IPv4(24,214,11,0),24 },
+       { IPv4(24,214,12,0),24 },
+       { IPv4(24,214,13,0),24 },
+       { IPv4(24,214,14,0),24 },
+       { IPv4(24,214,15,0),24 },
+       { IPv4(24,214,16,0),24 },
+       { IPv4(24,214,17,0),24 },
+       { IPv4(24,214,18,0),24 },
+       { IPv4(24,214,19,0),24 },
+       { IPv4(24,214,20,0),24 },
+       { IPv4(24,214,21,0),24 },
+       { IPv4(24,214,22,0),24 },
+       { IPv4(24,214,23,0),24 },
+       { IPv4(24,214,24,0),24 },
+       { IPv4(24,214,25,0),24 },
+       { IPv4(24,214,26,0),24 },
+       { IPv4(24,214,27,0),24 },
+       { IPv4(24,214,28,0),24 },
+       { IPv4(24,214,29,0),24 },
+       { IPv4(24,214,30,0),24 },
+       { IPv4(24,214,31,0),24 },
+       { IPv4(24,214,32,0),24 },
+       { IPv4(24,214,33,0),24 },
+       { IPv4(24,214,34,0),24 },
+       { IPv4(24,214,35,0),24 },
+       { IPv4(24,214,36,0),24 },
+       { IPv4(24,214,37,0),24 },
+       { IPv4(24,214,38,0),24 },
+       { IPv4(24,214,39,0),24 },
+       { IPv4(24,214,40,0),24 },
+       { IPv4(24,214,41,0),24 },
+       { IPv4(24,214,42,0),24 },
+       { IPv4(24,214,43,0),24 },
+       { IPv4(24,214,44,0),24 },
+       { IPv4(24,214,45,0),24 },
+       { IPv4(24,214,46,0),24 },
+       { IPv4(24,214,47,0),24 },
+       { IPv4(24,214,48,0),24 },
+       { IPv4(24,214,49,0),24 },
+       { IPv4(24,214,50,0),24 },
+       { IPv4(24,214,51,0),24 },
+       { IPv4(24,214,52,0),24 },
+       { IPv4(24,214,53,0),24 },
+       { IPv4(24,214,54,0),24 },
+       { IPv4(24,214,55,0),24 },
+       { IPv4(24,214,56,0),24 },
+       { IPv4(24,214,57,0),24 },
+       { IPv4(24,214,58,0),24 },
+       { IPv4(24,214,59,0),24 },
+       { IPv4(24,214,60,0),24 },
+       { IPv4(24,214,61,0),24 },
+       { IPv4(24,214,62,0),24 },
+       { IPv4(24,214,63,0),24 },
+       { IPv4(24,214,64,0),24 },
+       { IPv4(24,214,65,0),24 },
+       { IPv4(24,214,66,0),24 },
+       { IPv4(24,214,67,0),24 },
+       { IPv4(24,214,68,0),24 },
+       { IPv4(24,214,69,0),24 },
+       { IPv4(24,214,70,0),24 },
+       { IPv4(24,214,71,0),24 },
+       { IPv4(24,214,74,0),24 },
+       { IPv4(24,214,75,0),24 },
+       { IPv4(24,214,78,0),24 },
+       { IPv4(24,214,80,0),24 },
+       { IPv4(24,214,81,0),24 },
+       { IPv4(24,214,82,0),24 },
+       { IPv4(24,214,86,0),24 },
+       { IPv4(24,214,87,0),24 },
+       { IPv4(24,214,90,0),24 },
+       { IPv4(24,214,91,0),24 },
+       { IPv4(24,214,93,0),24 },
+       { IPv4(24,214,94,0),24 },
+       { IPv4(24,214,96,0),24 },
+       { IPv4(24,214,97,0),24 },
+       { IPv4(24,214,98,0),24 },
+       { IPv4(24,214,99,0),24 },
+       { IPv4(24,214,102,0),24 },
+       { IPv4(24,214,103,0),24 },
+       { IPv4(24,214,105,0),24 },
+       { IPv4(24,214,108,0),24 },
+       { IPv4(24,214,109,0),24 },
+       { IPv4(24,214,113,0),24 },
+       { IPv4(24,214,114,0),24 },
+       { IPv4(24,214,115,0),24 },
+       { IPv4(24,214,120,0),24 },
+       { IPv4(24,214,121,0),24 },
+       { IPv4(24,214,122,0),24 },
+       { IPv4(24,214,126,0),24 },
+       { IPv4(24,214,128,0),19 },
+       { IPv4(24,214,133,0),24 },
+       { IPv4(24,214,134,0),24 },
+       { IPv4(24,215,0,0),19 },
+       { IPv4(24,215,16,0),20 },
+       { IPv4(24,215,24,0),21 },
+       { IPv4(24,215,32,0),20 },
+       { IPv4(24,216,10,0),24 },
+       { IPv4(24,216,141,0),24 },
+       { IPv4(24,216,184,0),24 },
+       { IPv4(24,216,241,0),24 },
+       { IPv4(24,216,252,0),24 },
+       { IPv4(24,216,253,0),24 },
+       { IPv4(24,216,254,0),24 },
+       { IPv4(24,216,255,0),24 },
+       { IPv4(24,217,0,0),16 },
+       { IPv4(24,218,0,0),16 },
+       { IPv4(24,218,188,0),22 },
+       { IPv4(24,221,208,0),20 },
+       { IPv4(24,222,112,0),20 },
+       { IPv4(24,223,0,0),18 },
+       { IPv4(24,223,64,0),20 },
+       { IPv4(24,226,0,0),17 },
+       { IPv4(24,226,32,0),20 },
+       { IPv4(24,226,48,0),20 },
+       { IPv4(24,226,64,0),20 },
+       { IPv4(24,226,80,0),20 },
+       { IPv4(24,226,96,0),20 },
+       { IPv4(24,226,112,0),20 },
+       { IPv4(24,227,0,0),19 },
+       { IPv4(24,228,0,0),18 },
+       { IPv4(24,228,64,0),19 },
+       { IPv4(24,229,0,0),17 },
+       { IPv4(24,229,128,0),19 },
+       { IPv4(24,229,160,0),20 },
+       { IPv4(24,234,0,0),19 },
+       { IPv4(24,234,32,0),19 },
+       { IPv4(24,234,64,0),19 },
+       { IPv4(24,234,96,0),19 },
+       { IPv4(24,234,128,0),19 },
+       { IPv4(24,234,160,0),19 },
+       { IPv4(24,234,192,0),19 },
+       { IPv4(24,234,224,0),19 },
+       { IPv4(24,236,0,0),19 },
+       { IPv4(24,240,12,0),24 },
+       { IPv4(24,240,26,0),24 },
+       { IPv4(24,240,97,0),24 },
+       { IPv4(24,240,100,0),24 },
+       { IPv4(24,240,119,0),24 },
+       { IPv4(24,240,122,0),24 },
+       { IPv4(24,240,144,0),24 },
+       { IPv4(24,240,145,0),24 },
+       { IPv4(24,240,146,0),24 },
+       { IPv4(24,240,147,0),24 },
+       { IPv4(24,240,148,0),24 },
+       { IPv4(24,240,149,0),24 },
+       { IPv4(24,240,180,0),24 },
+       { IPv4(24,240,186,0),24 },
+       { IPv4(24,240,194,0),24 },
+       { IPv4(24,240,199,0),24 },
+       { IPv4(24,240,207,0),24 },
+       { IPv4(24,240,213,0),24 },
+       { IPv4(24,240,229,0),24 },
+       { IPv4(24,240,230,0),24 },
+       { IPv4(24,240,232,0),24 },
+       { IPv4(24,240,233,0),24 },
+       { IPv4(24,240,234,0),24 },
+       { IPv4(24,240,235,0),24 },
+       { IPv4(24,240,236,0),24 },
+       { IPv4(24,240,242,0),24 },
+       { IPv4(24,240,243,0),24 },
+       { IPv4(24,240,244,0),24 },
+       { IPv4(24,241,54,0),24 },
+       { IPv4(24,241,71,0),24 },
+       { IPv4(24,241,88,0),24 },
+       { IPv4(24,241,105,0),24 },
+       { IPv4(24,241,111,0),24 },
+       { IPv4(24,241,120,0),24 },
+       { IPv4(24,241,128,0),24 },
+       { IPv4(24,241,135,0),24 },
+       { IPv4(24,241,154,0),24 },
+       { IPv4(24,241,167,0),24 },
+       { IPv4(24,241,185,0),24 },
+       { IPv4(24,242,0,0),19 },
+       { IPv4(24,242,32,0),19 },
+       { IPv4(24,242,64,0),19 },
+       { IPv4(24,242,96,0),19 },
+       { IPv4(24,242,128,0),20 },
+       { IPv4(24,242,144,0),20 },
+       { IPv4(24,242,160,0),20 },
+       { IPv4(24,242,176,0),20 },
+       { IPv4(24,244,0,0),20 },
+       { IPv4(24,244,16,0),20 },
+       { IPv4(24,245,0,0),18 },
+       { IPv4(24,245,64,0),20 },
+       { IPv4(24,246,0,0),17 },
+       { IPv4(24,246,9,0),24 },
+       { IPv4(24,246,10,0),23 },
+       { IPv4(24,246,12,0),22 },
+       { IPv4(24,246,16,0),23 },
+       { IPv4(24,246,38,0),24 },
+       { IPv4(24,246,60,0),24 },
+       { IPv4(24,246,122,0),24 },
+       { IPv4(24,246,128,0),18 },
+       { IPv4(24,247,0,0),20 },
+       { IPv4(24,247,16,0),21 },
+       { IPv4(24,247,32,0),20 },
+       { IPv4(24,247,48,0),20 },
+       { IPv4(24,247,48,0),21 },
+       { IPv4(24,247,64,0),20 },
+       { IPv4(24,247,96,0),20 },
+       { IPv4(24,247,112,0),20 },
+       { IPv4(24,247,128,0),20 },
+       { IPv4(24,247,144,0),20 },
+       { IPv4(24,247,152,0),22 },
+       { IPv4(24,247,176,0),20 },
+       { IPv4(24,248,0,0),17 },
+       { IPv4(24,248,0,0),13 },
+       { IPv4(24,248,128,0),17 },
+       { IPv4(24,249,0,0),17 },
+       { IPv4(24,249,128,0),17 },
+       { IPv4(24,250,0,0),18 },
+       { IPv4(24,250,64,0),18 },
+       { IPv4(24,250,128,0),18 },
+       { IPv4(24,250,192,0),19 },
+       { IPv4(24,250,224,0),19 },
+       { IPv4(24,251,0,0),17 },
+       { IPv4(24,251,128,0),18 },
+       { IPv4(24,251,192,0),18 },
+       { IPv4(24,252,0,0),17 },
+       { IPv4(24,252,128,0),17 },
+       { IPv4(24,253,0,0),17 },
+       { IPv4(24,253,128,0),17 },
+       { IPv4(24,254,0,0),17 },
+       { IPv4(24,254,128,0),17 },
+       { IPv4(24,255,0,0),17 },
+       { IPv4(24,255,128,0),17 },
+       { IPv4(32,0,0,0),8 },
+       { IPv4(32,96,0,0),13 },
+       { IPv4(32,96,43,0),24 },
+       { IPv4(32,96,48,0),24 },
+       { IPv4(32,96,62,0),24 },
+       { IPv4(32,96,83,0),24 },
+       { IPv4(32,96,86,0),24 },
+       { IPv4(32,96,111,0),24 },
+       { IPv4(32,96,224,0),19 },
+       { IPv4(32,97,17,0),24 },
+       { IPv4(32,97,80,0),21 },
+       { IPv4(32,97,87,0),24 },
+       { IPv4(32,97,91,0),24 },
+       { IPv4(32,97,100,0),24 },
+       { IPv4(32,97,104,0),24 },
+       { IPv4(32,97,110,0),24 },
+       { IPv4(32,97,132,0),24 },
+       { IPv4(32,97,135,0),24 },
+       { IPv4(32,97,136,0),24 },
+       { IPv4(32,97,152,0),24 },
+       { IPv4(32,97,155,0),24 },
+       { IPv4(32,97,159,0),24 },
+       { IPv4(32,97,167,0),24 },
+       { IPv4(32,97,168,0),23 },
+       { IPv4(32,97,170,0),24 },
+       { IPv4(32,97,182,0),24 },
+       { IPv4(32,97,183,0),24 },
+       { IPv4(32,97,185,0),24 },
+       { IPv4(32,97,198,0),24 },
+       { IPv4(32,97,212,0),24 },
+       { IPv4(32,97,217,0),24 },
+       { IPv4(32,97,219,0),24 },
+       { IPv4(32,97,225,0),24 },
+       { IPv4(32,97,240,0),23 },
+       { IPv4(32,97,242,0),24 },
+       { IPv4(32,97,252,0),22 },
+       { IPv4(32,102,134,0),23 },
+       { IPv4(32,102,136,0),22 },
+       { IPv4(32,102,140,0),23 },
+       { IPv4(32,102,197,0),24 },
+       { IPv4(32,102,198,0),24 },
+       { IPv4(32,102,199,0),24 },
+       { IPv4(32,102,200,0),24 },
+       { IPv4(32,102,201,0),24 },
+       { IPv4(32,102,202,0),24 },
+       { IPv4(32,102,203,0),24 },
+       { IPv4(32,102,204,0),24 },
+       { IPv4(32,102,205,0),24 },
+       { IPv4(32,102,206,0),24 },
+       { IPv4(32,102,207,0),24 },
+       { IPv4(32,102,208,0),24 },
+       { IPv4(32,102,233,0),24 },
+       { IPv4(32,102,234,0),24 },
+       { IPv4(32,102,235,0),24 },
+       { IPv4(32,102,236,0),24 },
+       { IPv4(32,102,237,0),24 },
+       { IPv4(32,102,238,0),24 },
+       { IPv4(32,102,239,0),24 },
+       { IPv4(32,102,240,0),24 },
+       { IPv4(32,102,241,0),24 },
+       { IPv4(32,102,242,0),24 },
+       { IPv4(32,102,243,0),24 },
+       { IPv4(32,102,244,0),24 },
+       { IPv4(32,104,0,0),15 },
+       { IPv4(32,107,14,0),24 },
+       { IPv4(32,107,31,0),24 },
+       { IPv4(32,224,112,0),24 },
+       { IPv4(32,224,249,0),24 },
+       { IPv4(32,227,135,0),24 },
+       { IPv4(32,227,215,0),24 },
+       { IPv4(32,227,216,0),24 },
+       { IPv4(32,227,217,0),24 },
+       { IPv4(32,227,218,0),24 },
+       { IPv4(32,227,219,0),24 },
+       { IPv4(32,227,220,0),24 },
+       { IPv4(32,227,233,0),24 },
+       { IPv4(32,227,234,0),24 },
+       { IPv4(32,227,235,0),24 },
+       { IPv4(32,227,236,0),24 },
+       { IPv4(32,227,237,0),24 },
+       { IPv4(32,227,238,0),24 },
+       { IPv4(32,228,128,0),19 },
+       { IPv4(32,229,0,0),18 },
+       { IPv4(32,229,64,0),18 },
+       { IPv4(32,229,128,0),18 },
+       { IPv4(32,229,192,0),18 },
+       { IPv4(33,0,0,0),8 },
+       { IPv4(35,35,96,0),20 },
+       { IPv4(35,35,144,0),20 },
+       { IPv4(35,35,176,0),20 },
+       { IPv4(38,156,161,0),24 },
+       { IPv4(38,195,234,0),24 },
+       { IPv4(38,233,177,0),24 },
+       { IPv4(38,241,180,0),24 },
+       { IPv4(38,241,183,0),24 },
+       { IPv4(40,0,96,0),22 },
+       { IPv4(44,0,0,0),11 },
+       { IPv4(44,4,129,0),24 },
+       { IPv4(44,32,0,0),13 },
+       { IPv4(44,40,0,0),14 },
+       { IPv4(44,46,0,0),15 },
+       { IPv4(44,48,0,0),12 },
+       { IPv4(44,64,0,0),10 },
+       { IPv4(44,128,0,0),9 },
+       { IPv4(44,166,0,0),16 },
+       { IPv4(47,8,0,0),14 },
+       { IPv4(47,46,0,0),15 },
+       { IPv4(47,46,48,0),20 },
+       { IPv4(47,46,160,0),19 },
+       { IPv4(47,46,192,0),20 },
+       { IPv4(47,46,208,0),20 },
+       { IPv4(47,46,234,0),23 },
+       { IPv4(47,47,224,0),21 },
+       { IPv4(47,47,240,0),21 },
+       { IPv4(47,153,64,0),18 },
+       { IPv4(47,153,128,0),18 },
+       { IPv4(47,249,0,0),16 },
+       { IPv4(47,249,128,0),17 },
+       { IPv4(53,244,0,0),19 },
+       { IPv4(55,0,0,0),8 },
+       { IPv4(56,0,64,0),19 },
+       { IPv4(56,0,128,0),18 },
+       { IPv4(56,0,128,0),19 },
+       { IPv4(56,0,160,0),19 },
+       { IPv4(61,6,0,0),17 },
+       { IPv4(61,6,128,0),18 },
+       { IPv4(61,8,0,0),19 },
+       { IPv4(61,8,30,0),24 },
+       { IPv4(61,8,96,0),19 },
+       { IPv4(61,8,242,0),24 },
+       { IPv4(61,8,243,0),24 },
+       { IPv4(61,8,244,0),24 },
+       { IPv4(61,8,245,0),24 },
+       { IPv4(61,8,246,0),24 },
+       { IPv4(61,8,247,0),24 },
+       { IPv4(61,8,248,0),24 },
+       { IPv4(61,8,249,0),24 },
+       { IPv4(61,8,250,0),24 },
+       { IPv4(61,8,251,0),24 },
+       { IPv4(61,9,0,0),17 },
+       { IPv4(61,9,73,0),24 },
+       { IPv4(61,9,74,0),24 },
+       { IPv4(61,9,75,0),24 },
+       { IPv4(61,9,76,0),24 },
+       { IPv4(61,9,77,0),24 },
+       { IPv4(61,9,78,0),24 },
+       { IPv4(61,9,112,0),24 },
+       { IPv4(61,9,126,0),24 },
+       { IPv4(61,10,0,0),17 },
+       { IPv4(61,10,128,0),17 },
+       { IPv4(61,11,0,0),19 },
+       { IPv4(61,11,12,0),22 },
+       { IPv4(61,11,24,0),21 },
+       { IPv4(61,11,32,0),20 },
+       { IPv4(61,11,36,0),22 },
+       { IPv4(61,11,48,0),21 },
+       { IPv4(61,13,0,0),16 },
+       { IPv4(61,14,32,0),22 },
+       { IPv4(61,15,0,0),17 },
+       { IPv4(61,16,0,0),17 },
+       { IPv4(61,18,0,0),18 },
+       { IPv4(61,18,0,0),17 },
+       { IPv4(61,18,64,0),18 },
+       { IPv4(61,18,128,0),17 },
+       { IPv4(61,18,128,0),18 },
+       { IPv4(61,18,192,0),18 },
+       { IPv4(61,20,0,0),16 },
+       { IPv4(61,30,0,0),19 },
+       { IPv4(61,30,0,0),16 },
+       { IPv4(61,30,64,0),19 },
+       { IPv4(61,30,128,0),20 },
+       { IPv4(61,30,144,0),21 },
+       { IPv4(61,30,176,0),20 },
+       { IPv4(61,30,192,0),21 },
+       { IPv4(61,32,0,0),13 },
+       { IPv4(61,33,241,0),24 },
+       { IPv4(61,33,244,0),24 },
+       { IPv4(61,37,254,0),24 },
+       { IPv4(61,40,0,0),14 },
+       { IPv4(61,48,0,0),16 },
+       { IPv4(61,56,192,0),19 },
+       { IPv4(61,56,224,0),19 },
+       { IPv4(61,57,128,0),20 },
+       { IPv4(61,58,128,0),19 },
+       { IPv4(61,59,0,0),16 },
+       { IPv4(61,59,0,0),19 },
+       { IPv4(61,59,0,0),18 },
+       { IPv4(61,59,64,0),18 },
+       { IPv4(61,59,128,0),18 },
+       { IPv4(61,59,192,0),18 },
+       { IPv4(61,60,0,0),19 },
+       { IPv4(61,61,0,0),21 },
+       { IPv4(61,61,8,0),21 },
+       { IPv4(61,61,16,0),21 },
+       { IPv4(61,61,24,0),21 },
+       { IPv4(61,61,32,0),20 },
+       { IPv4(61,61,48,0),21 },
+       { IPv4(61,61,48,0),20 },
+       { IPv4(61,61,56,0),21 },
+       { IPv4(61,68,0,0),15 },
+       { IPv4(61,70,0,0),16 },
+       { IPv4(61,71,0,0),17 },
+       { IPv4(61,72,0,0),13 },
+       { IPv4(61,72,102,0),23 },
+       { IPv4(61,72,104,0),21 },
+       { IPv4(61,73,64,0),24 },
+       { IPv4(61,73,152,0),24 },
+       { IPv4(61,78,50,0),24 },
+       { IPv4(61,78,74,0),24 },
+       { IPv4(61,78,126,0),24 },
+       { IPv4(61,78,127,0),24 },
+       { IPv4(61,78,128,0),24 },
+       { IPv4(61,80,0,0),14 },
+       { IPv4(61,84,0,0),15 },
+       { IPv4(61,96,0,0),17 },
+       { IPv4(61,96,20,0),22 },
+       { IPv4(61,96,66,0),23 },
+       { IPv4(61,96,68,0),22 },
+       { IPv4(61,96,72,0),22 },
+       { IPv4(61,96,96,0),21 },
+       { IPv4(61,96,108,0),22 },
+       { IPv4(61,96,116,0),22 },
+       { IPv4(61,96,124,0),22 },
+       { IPv4(61,114,64,0),20 },
+       { IPv4(61,114,80,0),20 },
+       { IPv4(61,114,128,0),19 },
+       { IPv4(61,115,208,0),20 },
+       { IPv4(61,115,240,0),20 },
+       { IPv4(61,117,0,0),17 },
+       { IPv4(61,120,0,0),17 },
+       { IPv4(61,120,144,0),20 },
+       { IPv4(61,120,192,0),20 },
+       { IPv4(61,121,224,0),20 },
+       { IPv4(61,122,48,0),20 },
+       { IPv4(61,122,128,0),18 },
+       { IPv4(61,122,208,0),20 },
+       { IPv4(61,122,240,0),20 },
+       { IPv4(61,125,160,0),20 },
+       { IPv4(61,128,96,0),19 },
+       { IPv4(61,128,128,0),17 },
+       { IPv4(61,129,0,0),16 },
+       { IPv4(61,130,0,0),17 },
+       { IPv4(61,130,128,0),17 },
+       { IPv4(61,131,0,0),17 },
+       { IPv4(61,131,128,0),17 },
+       { IPv4(61,132,0,0),17 },
+       { IPv4(61,132,128,0),17 },
+       { IPv4(61,133,0,0),17 },
+       { IPv4(61,133,128,0),18 },
+       { IPv4(61,133,192,0),19 },
+       { IPv4(61,133,224,0),19 },
+       { IPv4(61,134,0,0),18 },
+       { IPv4(61,134,128,0),18 },
+       { IPv4(61,134,192,0),18 },
+       { IPv4(61,135,0,0),17 },
+       { IPv4(61,135,128,0),19 },
+       { IPv4(61,136,0,0),18 },
+       { IPv4(61,136,64,0),18 },
+       { IPv4(61,136,128,0),18 },
+       { IPv4(61,137,0,0),17 },
+       { IPv4(61,137,128,0),17 },
+       { IPv4(61,138,0,0),18 },
+       { IPv4(61,138,64,0),18 },
+       { IPv4(61,138,128,0),18 },
+       { IPv4(61,138,192,0),19 },
+       { IPv4(61,138,224,0),19 },
+       { IPv4(61,139,0,0),17 },
+       { IPv4(61,139,128,0),18 },
+       { IPv4(61,139,128,0),17 },
+       { IPv4(61,139,192,0),18 },
+       { IPv4(61,140,0,0),14 },
+       { IPv4(61,144,0,0),15 },
+       { IPv4(61,146,0,0),16 },
+       { IPv4(61,147,0,0),16 },
+       { IPv4(61,148,0,0),15 },
+       { IPv4(61,150,0,0),17 },
+       { IPv4(61,150,128,0),17 },
+       { IPv4(61,151,0,0),16 },
+       { IPv4(61,152,0,0),16 },
+       { IPv4(61,153,0,0),16 },
+       { IPv4(61,154,0,0),16 },
+       { IPv4(61,155,0,0),16 },
+       { IPv4(61,156,0,0),16 },
+       { IPv4(61,157,0,0),16 },
+       { IPv4(61,158,0,0),17 },
+       { IPv4(61,158,128,0),17 },
+       { IPv4(61,159,0,0),18 },
+       { IPv4(61,159,64,0),18 },
+       { IPv4(61,159,128,0),18 },
+       { IPv4(61,159,192,0),18 },
+       { IPv4(61,160,0,0),16 },
+       { IPv4(61,161,0,0),18 },
+       { IPv4(61,161,128,0),17 },
+       { IPv4(61,163,0,0),16 },
+       { IPv4(61,164,0,0),16 },
+       { IPv4(61,165,0,0),16 },
+       { IPv4(61,166,0,0),16 },
+       { IPv4(61,167,0,0),17 },
+       { IPv4(61,167,0,0),16 },
+       { IPv4(61,167,128,0),17 },
+       { IPv4(61,168,0,0),16 },
+       { IPv4(61,169,0,0),16 },
+       { IPv4(61,170,0,0),16 },
+       { IPv4(61,171,0,0),16 },
+       { IPv4(61,172,0,0),16 },
+       { IPv4(61,172,0,0),15 },
+       { IPv4(61,173,0,0),16 },
+       { IPv4(61,174,0,0),16 },
+       { IPv4(61,175,0,0),16 },
+       { IPv4(61,176,0,0),16 },
+       { IPv4(61,177,0,0),16 },
+       { IPv4(61,178,0,0),16 },
+       { IPv4(61,179,0,0),16 },
+       { IPv4(61,180,0,0),17 },
+       { IPv4(61,180,128,0),17 },
+       { IPv4(61,181,0,0),16 },
+       { IPv4(61,182,0,0),16 },
+       { IPv4(61,183,0,0),16 },
+       { IPv4(61,184,0,0),16 },
+       { IPv4(61,185,0,0),16 },
+       { IPv4(61,186,0,0),17 },
+       { IPv4(61,186,64,0),18 },
+       { IPv4(61,186,128,0),17 },
+       { IPv4(61,187,0,0),16 },
+       { IPv4(61,188,0,0),16 },
+       { IPv4(61,189,0,0),17 },
+       { IPv4(61,189,128,0),17 },
+       { IPv4(61,190,0,0),16 },
+       { IPv4(61,193,0,0),17 },
+       { IPv4(61,193,144,0),20 },
+       { IPv4(61,195,48,0),21 },
+       { IPv4(61,195,64,0),20 },
+       { IPv4(61,195,96,0),19 },
+       { IPv4(61,195,128,0),20 },
+       { IPv4(61,195,224,0),20 },
+       { IPv4(61,198,16,0),20 },
+       { IPv4(61,198,64,0),19 },
+       { IPv4(61,198,128,0),17 },
+       { IPv4(61,200,80,0),20 },
+       { IPv4(61,200,128,0),17 },
+       { IPv4(61,202,0,0),17 },
+       { IPv4(61,202,128,0),18 },
+       { IPv4(61,203,0,0),17 },
+       { IPv4(61,203,176,0),20 },
+       { IPv4(61,203,192,0),19 },
+       { IPv4(61,204,0,0),17 },
+       { IPv4(61,205,0,0),20 },
+       { IPv4(61,205,64,0),20 },
+       { IPv4(61,205,80,0),20 },
+       { IPv4(61,205,96,0),20 },
+       { IPv4(61,205,112,0),20 },
+       { IPv4(61,206,0,0),20 },
+       { IPv4(61,206,96,0),20 },
+       { IPv4(61,206,112,0),20 },
+       { IPv4(61,206,224,0),20 },
+       { IPv4(61,211,128,0),20 },
+       { IPv4(61,211,128,0),23 },
+       { IPv4(61,211,130,0),24 },
+       { IPv4(61,211,176,0),20 },
+       { IPv4(61,213,128,0),20 },
+       { IPv4(61,213,144,0),20 },
+       { IPv4(61,213,160,0),19 },
+       { IPv4(61,213,192,0),21 },
+       { IPv4(61,213,208,0),20 },
+       { IPv4(61,213,240,0),20 },
+       { IPv4(61,215,176,0),20 },
+       { IPv4(61,215,208,0),22 },
+       { IPv4(61,215,240,0),20 },
+       { IPv4(61,216,0,0),18 },
+       { IPv4(61,216,64,0),18 },
+       { IPv4(61,217,0,0),16 },
+       { IPv4(61,220,0,0),16 },
+       { IPv4(61,226,0,0),16 },
+       { IPv4(61,227,0,0),16 },
+       { IPv4(61,248,0,0),17 },
+       { IPv4(61,248,0,0),16 },
+       { IPv4(61,248,128,0),17 },
+       { IPv4(61,250,0,0),18 },
+       { IPv4(61,250,128,0),18 },
+       { IPv4(61,251,0,0),20 },
+       { IPv4(61,251,48,0),20 },
+       { IPv4(61,251,128,0),20 },
+       { IPv4(61,251,144,0),20 },
+       { IPv4(61,251,160,0),20 },
+       { IPv4(61,251,224,0),19 },
+       { IPv4(61,252,0,0),21 },
+       { IPv4(61,252,8,0),22 },
+       { IPv4(61,252,12,0),23 },
+       { IPv4(61,252,14,0),23 },
+       { IPv4(61,252,32,0),19 },
+       { IPv4(61,252,128,0),19 },
+       { IPv4(61,252,192,0),19 },
+       { IPv4(61,253,0,0),17 },
+       { IPv4(61,254,0,0),15 },
+       { IPv4(62,1,0,0),16 },
+       { IPv4(62,2,0,0),16 },
+       { IPv4(62,3,0,0),19 },
+       { IPv4(62,4,64,0),19 },
+       { IPv4(62,5,0,0),17 },
+       { IPv4(62,6,0,0),16 },
+       { IPv4(62,7,0,0),16 },
+       { IPv4(62,8,0,0),19 },
+       { IPv4(62,8,10,0),24 },
+       { IPv4(62,8,10,0),23 },
+       { IPv4(62,8,11,0),24 },
+       { IPv4(62,12,0,0),19 },
+       { IPv4(62,13,192,0),19 },
+       { IPv4(62,14,0,0),15 },
+       { IPv4(62,28,0,0),19 },
+       { IPv4(62,29,0,0),17 },
+       { IPv4(62,30,0,0),15 },
+       { IPv4(62,38,0,0),16 },
+       { IPv4(62,40,128,0),17 },
+       { IPv4(62,42,0,0),16 },
+       { IPv4(62,48,0,0),19 },
+       { IPv4(62,48,64,0),19 },
+       { IPv4(62,48,96,0),19 },
+       { IPv4(62,49,0,0),16 },
+       { IPv4(62,56,0,0),17 },
+       { IPv4(62,58,0,0),15 },
+       { IPv4(62,72,64,0),19 },
+       { IPv4(62,74,0,0),21 },
+       { IPv4(62,74,12,0),22 },
+       { IPv4(62,74,16,0),20 },
+       { IPv4(62,74,32,0),19 },
+       { IPv4(62,74,64,0),18 },
+       { IPv4(62,74,128,0),18 },
+       { IPv4(62,74,192,0),19 },
+       { IPv4(62,74,240,0),20 },
+       { IPv4(62,77,0,0),19 },
+       { IPv4(62,80,64,0),20 },
+       { IPv4(62,80,80,0),20 },
+       { IPv4(62,97,145,0),24 },
+       { IPv4(62,100,0,0),18 },
+       { IPv4(62,102,0,0),17 },
+       { IPv4(62,103,0,0),16 },
+       { IPv4(62,104,56,0),24 },
+       { IPv4(62,104,174,0),24 },
+       { IPv4(62,108,64,0),19 },
+       { IPv4(62,108,192,0),19 },
+       { IPv4(62,111,0,0),17 },
+       { IPv4(62,113,0,0),19 },
+       { IPv4(62,116,128,0),19 },
+       { IPv4(62,128,192,0),20 },
+       { IPv4(62,128,208,0),20 },
+       { IPv4(62,129,128,0),19 },
+       { IPv4(62,131,0,0),16 },
+       { IPv4(62,134,0,0),16 },
+       { IPv4(62,151,0,0),19 },
+       { IPv4(62,151,32,0),19 },
+       { IPv4(62,151,64,0),18 },
+       { IPv4(62,152,128,0),19 },
+       { IPv4(62,166,0,0),16 },
+       { IPv4(62,168,128,0),19 },
+       { IPv4(62,170,0,0),15 },
+       { IPv4(62,172,0,0),21 },
+       { IPv4(62,172,0,0),16 },
+       { IPv4(62,172,4,0),22 },
+       { IPv4(62,180,0,0),16 },
+       { IPv4(62,185,204,0),24 },
+       { IPv4(62,186,35,0),24 },
+       { IPv4(62,186,236,0),24 },
+       { IPv4(62,192,0,0),19 },
+       { IPv4(62,200,0,0),16 },
+       { IPv4(62,204,96,0),19 },
+       { IPv4(62,205,0,0),19 },
+       { IPv4(62,212,128,0),19 },
+       { IPv4(62,215,0,0),16 },
+       { IPv4(62,216,192,0),22 },
+       { IPv4(62,229,128,0),20 },
+       { IPv4(62,229,130,0),24 },
+       { IPv4(62,229,132,0),24 },
+       { IPv4(62,232,0,0),16 },
+       { IPv4(62,232,20,0),24 },
+       { IPv4(62,232,21,0),24 },
+       { IPv4(62,232,22,0),24 },
+       { IPv4(62,232,46,0),24 },
+       { IPv4(62,232,72,0),24 },
+       { IPv4(62,233,0,0),19 },
+       { IPv4(62,238,0,0),16 },
+       { IPv4(62,250,0,0),16 },
+       { IPv4(62,251,0,0),17 },
+       { IPv4(62,252,0,0),17 },
+       { IPv4(62,252,0,0),14 },
+       { IPv4(62,252,0,0),16 },
+       { IPv4(62,252,128,0),17 },
+       { IPv4(62,253,128,0),17 },
+       { IPv4(62,254,0,0),16 },
+       { IPv4(62,254,128,0),17 },
+       { IPv4(62,255,128,0),17 },
+       { IPv4(63,64,59,0),24 },
+       { IPv4(63,64,126,0),24 },
+       { IPv4(63,64,130,0),23 },
+       { IPv4(63,64,228,0),23 },
+       { IPv4(63,64,247,0),24 },
+       { IPv4(63,64,254,0),23 },
+       { IPv4(63,65,84,0),23 },
+       { IPv4(63,65,127,0),24 },
+       { IPv4(63,65,176,0),22 },
+       { IPv4(63,65,221,0),24 },
+       { IPv4(63,65,236,0),22 },
+       { IPv4(63,65,248,0),22 },
+       { IPv4(63,66,112,0),24 },
+       { IPv4(63,66,113,0),24 },
+       { IPv4(63,66,240,0),24 },
+       { IPv4(63,66,246,0),24 },
+       { IPv4(63,67,32,0),24 },
+       { IPv4(63,67,73,0),24 },
+       { IPv4(63,67,116,0),23 },
+       { IPv4(63,67,188,0),24 },
+       { IPv4(63,67,196,0),24 },
+       { IPv4(63,67,205,0),24 },
+       { IPv4(63,68,54,0),23 },
+       { IPv4(63,68,112,0),24 },
+       { IPv4(63,68,218,0),23 },
+       { IPv4(63,69,98,0),23 },
+       { IPv4(63,69,114,0),23 },
+       { IPv4(63,69,228,0),22 },
+       { IPv4(63,69,230,0),24 },
+       { IPv4(63,69,231,0),24 },
+       { IPv4(63,69,248,0),21 },
+       { IPv4(63,70,161,0),24 },
+       { IPv4(63,70,164,0),23 },
+       { IPv4(63,70,212,0),24 },
+       { IPv4(63,71,3,0),24 },
+       { IPv4(63,71,94,0),23 },
+       { IPv4(63,71,166,0),23 },
+       { IPv4(63,72,61,0),24 },
+       { IPv4(63,72,216,0),24 },
+       { IPv4(63,73,1,0),24 },
+       { IPv4(63,73,4,0),22 },
+       { IPv4(63,73,10,0),24 },
+       { IPv4(63,73,11,0),24 },
+       { IPv4(63,73,12,0),24 },
+       { IPv4(63,73,58,0),24 },
+       { IPv4(63,73,70,0),24 },
+       { IPv4(63,73,78,0),24 },
+       { IPv4(63,73,123,0),24 },
+       { IPv4(63,73,130,0),23 },
+       { IPv4(63,73,136,0),22 },
+       { IPv4(63,73,169,0),24 },
+       { IPv4(63,73,182,0),24 },
+       { IPv4(63,73,204,0),24 },
+       { IPv4(63,73,224,0),22 },
+       { IPv4(63,73,225,0),24 },
+       { IPv4(63,73,227,0),24 },
+       { IPv4(63,73,238,0),24 },
+       { IPv4(63,73,240,0),21 },
+       { IPv4(63,74,32,0),23 },
+       { IPv4(63,74,89,0),24 },
+       { IPv4(63,74,160,0),24 },
+       { IPv4(63,74,163,0),24 },
+       { IPv4(63,74,226,0),24 },
+       { IPv4(63,75,68,0),23 },
+       { IPv4(63,75,74,0),24 },
+       { IPv4(63,75,78,0),24 },
+       { IPv4(63,75,79,0),24 },
+       { IPv4(63,75,91,0),24 },
+       { IPv4(63,75,167,0),24 },
+       { IPv4(63,75,194,0),24 },
+       { IPv4(63,76,98,0),24 },
+       { IPv4(63,76,137,0),24 },
+       { IPv4(63,76,243,0),24 },
+       { IPv4(63,76,244,0),24 },
+       { IPv4(63,76,245,0),24 },
+       { IPv4(63,77,90,0),24 },
+       { IPv4(63,78,12,0),22 },
+       { IPv4(63,78,137,0),24 },
+       { IPv4(63,79,29,0),24 },
+       { IPv4(63,79,104,0),24 },
+       { IPv4(63,79,105,0),24 },
+       { IPv4(63,79,122,0),24 },
+       { IPv4(63,79,128,0),21 },
+       { IPv4(63,80,13,0),24 },
+       { IPv4(63,80,45,0),24 },
+       { IPv4(63,81,224,0),24 },
+       { IPv4(63,81,227,0),24 },
+       { IPv4(63,81,228,0),24 },
+       { IPv4(63,81,231,0),24 },
+       { IPv4(63,81,234,0),24 },
+       { IPv4(63,81,236,0),24 },
+       { IPv4(63,81,238,0),24 },
+       { IPv4(63,81,239,0),24 },
+       { IPv4(63,82,26,0),24 },
+       { IPv4(63,82,40,0),22 },
+       { IPv4(63,82,43,0),24 },
+       { IPv4(63,82,44,0),23 },
+       { IPv4(63,82,80,0),24 },
+       { IPv4(63,82,241,0),24 },
+       { IPv4(63,83,36,0),23 },
+       { IPv4(63,83,95,0),24 },
+       { IPv4(63,83,140,0),22 },
+       { IPv4(63,83,208,0),20 },
+       { IPv4(63,83,240,0),22 },
+       { IPv4(63,83,244,0),22 },
+       { IPv4(63,84,15,0),24 },
+       { IPv4(63,84,62,0),24 },
+       { IPv4(63,84,63,0),24 },
+       { IPv4(63,84,72,0),22 },
+       { IPv4(63,84,74,0),24 },
+       { IPv4(63,84,122,0),24 },
+       { IPv4(63,84,135,0),24 },
+       { IPv4(63,84,140,0),22 },
+       { IPv4(63,84,231,0),24 },
+       { IPv4(63,85,19,0),24 },
+       { IPv4(63,85,72,0),24 },
+       { IPv4(63,85,181,0),24 },
+       { IPv4(63,85,212,0),24 },
+       { IPv4(63,85,213,0),24 },
+       { IPv4(63,86,126,0),24 },
+       { IPv4(63,87,84,0),24 },
+       { IPv4(63,87,170,0),23 },
+       { IPv4(63,87,173,0),24 },
+       { IPv4(63,87,220,0),23 },
+       { IPv4(63,88,88,0),23 },
+       { IPv4(63,88,172,0),24 },
+       { IPv4(63,89,141,0),24 },
+       { IPv4(63,89,167,0),24 },
+       { IPv4(63,90,24,0),23 },
+       { IPv4(63,90,66,0),23 },
+       { IPv4(63,90,77,0),24 },
+       { IPv4(63,90,79,0),24 },
+       { IPv4(63,91,110,0),23 },
+       { IPv4(63,91,145,0),24 },
+       { IPv4(63,91,172,0),24 },
+       { IPv4(63,91,173,0),24 },
+       { IPv4(63,92,80,0),21 },
+       { IPv4(63,92,88,0),22 },
+       { IPv4(63,92,133,0),24 },
+       { IPv4(63,92,172,0),24 },
+       { IPv4(63,92,192,0),23 },
+       { IPv4(63,92,194,0),24 },
+       { IPv4(63,93,152,0),24 },
+       { IPv4(63,93,196,0),24 },
+       { IPv4(63,93,197,0),24 },
+       { IPv4(63,93,203,0),24 },
+       { IPv4(63,94,99,0),24 },
+       { IPv4(63,94,105,0),24 },
+       { IPv4(63,95,0,0),21 },
+       { IPv4(63,95,86,0),24 },
+       { IPv4(63,95,193,0),24 },
+       { IPv4(63,95,216,0),24 },
+       { IPv4(63,95,254,0),23 },
+       { IPv4(63,96,60,0),24 },
+       { IPv4(63,96,61,0),24 },
+       { IPv4(63,96,62,0),24 },
+       { IPv4(63,96,63,0),24 },
+       { IPv4(63,97,1,0),24 },
+       { IPv4(63,97,144,0),24 },
+       { IPv4(63,97,145,0),24 },
+       { IPv4(63,97,179,0),24 },
+       { IPv4(63,97,180,0),22 },
+       { IPv4(63,98,125,0),24 },
+       { IPv4(63,98,127,0),24 },
+       { IPv4(63,98,188,0),22 },
+       { IPv4(63,99,9,0),24 },
+       { IPv4(63,99,41,0),24 },
+       { IPv4(63,99,120,0),22 },
+       { IPv4(63,99,128,0),21 },
+       { IPv4(63,99,152,0),23 },
+       { IPv4(63,100,17,0),24 },
+       { IPv4(63,100,108,0),24 },
+       { IPv4(63,100,128,0),23 },
+       { IPv4(63,100,130,0),23 },
+       { IPv4(63,100,192,0),21 },
+       { IPv4(63,100,195,0),24 },
+       { IPv4(63,100,199,0),24 },
+       { IPv4(63,100,200,0),22 },
+       { IPv4(63,100,202,0),23 },
+       { IPv4(63,100,204,0),24 },
+       { IPv4(63,100,204,0),22 },
+       { IPv4(63,100,205,0),24 },
+       { IPv4(63,100,206,0),24 },
+       { IPv4(63,100,207,0),24 },
+       { IPv4(63,100,208,0),24 },
+       { IPv4(63,100,209,0),24 },
+       { IPv4(63,100,210,0),24 },
+       { IPv4(63,100,211,0),24 },
+       { IPv4(63,100,212,0),22 },
+       { IPv4(63,100,216,0),22 },
+       { IPv4(63,100,222,0),23 },
+       { IPv4(63,101,54,0),23 },
+       { IPv4(63,101,83,0),24 },
+       { IPv4(63,101,150,0),23 },
+       { IPv4(63,102,5,0),24 },
+       { IPv4(63,102,48,0),23 },
+       { IPv4(63,102,72,0),21 },
+       { IPv4(63,102,192,0),22 },
+       { IPv4(63,102,218,0),24 },
+       { IPv4(63,102,224,0),22 },
+       { IPv4(63,103,40,0),22 },
+       { IPv4(63,103,83,0),24 },
+       { IPv4(63,103,128,0),24 },
+       { IPv4(63,103,129,0),24 },
+       { IPv4(63,103,130,0),24 },
+       { IPv4(63,103,132,0),23 },
+       { IPv4(63,103,134,0),24 },
+       { IPv4(63,103,135,0),24 },
+       { IPv4(63,103,136,0),24 },
+       { IPv4(63,103,137,0),24 },
+       { IPv4(63,103,138,0),24 },
+       { IPv4(63,103,139,0),24 },
+       { IPv4(63,103,140,0),24 },
+       { IPv4(63,103,141,0),24 },
+       { IPv4(63,103,142,0),24 },
+       { IPv4(63,103,143,0),24 },
+       { IPv4(63,103,182,0),24 },
+       { IPv4(63,103,202,0),24 },
+       { IPv4(63,104,48,0),22 },
+       { IPv4(63,104,84,0),22 },
+       { IPv4(63,104,160,0),24 },
+       { IPv4(63,104,192,0),21 },
+       { IPv4(63,104,240,0),24 },
+       { IPv4(63,104,243,0),24 },
+       { IPv4(63,105,7,0),24 },
+       { IPv4(63,105,100,0),24 },
+       { IPv4(63,105,126,0),23 },
+       { IPv4(63,105,192,0),20 },
+       { IPv4(63,106,49,0),24 },
+       { IPv4(63,106,156,0),23 },
+       { IPv4(63,107,10,0),23 },
+       { IPv4(63,107,112,0),24 },
+       { IPv4(63,107,128,0),24 },
+       { IPv4(63,107,135,0),24 },
+       { IPv4(63,107,224,0),23 },
+       { IPv4(63,108,88,0),21 },
+       { IPv4(63,108,112,0),22 },
+       { IPv4(63,108,116,0),24 },
+       { IPv4(63,108,125,0),24 },
+       { IPv4(63,108,133,0),24 },
+       { IPv4(63,109,64,0),24 },
+       { IPv4(63,109,65,0),24 },
+       { IPv4(63,109,68,0),24 },
+       { IPv4(63,109,71,0),24 },
+       { IPv4(63,109,72,0),24 },
+       { IPv4(63,109,75,0),24 },
+       { IPv4(63,109,76,0),24 },
+       { IPv4(63,109,77,0),24 },
+       { IPv4(63,109,78,0),24 },
+       { IPv4(63,109,79,0),24 },
+       { IPv4(63,109,240,0),20 },
+       { IPv4(63,110,83,0),24 },
+       { IPv4(63,110,128,0),20 },
+       { IPv4(63,110,160,0),21 },
+       { IPv4(63,110,188,0),24 },
+       { IPv4(63,112,144,0),24 },
+       { IPv4(63,112,168,0),22 },
+       { IPv4(63,113,38,0),23 },
+       { IPv4(63,113,73,0),24 },
+       { IPv4(63,113,80,0),20 },
+       { IPv4(63,114,0,0),24 },
+       { IPv4(63,114,74,0),23 },
+       { IPv4(63,114,88,0),23 },
+       { IPv4(63,114,195,0),24 },
+       { IPv4(63,115,198,0),24 },
+       { IPv4(63,117,40,0),21 },
+       { IPv4(63,117,79,0),24 },
+       { IPv4(63,117,116,0),24 },
+       { IPv4(63,117,117,0),24 },
+       { IPv4(63,117,118,0),24 },
+       { IPv4(63,117,119,0),24 },
+       { IPv4(63,118,66,0),24 },
+       { IPv4(63,118,148,0),24 },
+       { IPv4(63,118,152,0),23 },
+       { IPv4(63,118,165,0),24 },
+       { IPv4(63,118,246,0),24 },
+       { IPv4(63,118,247,0),24 },
+       { IPv4(63,120,80,0),24 },
+       { IPv4(63,120,115,0),24 },
+       { IPv4(63,120,127,0),24 },
+       { IPv4(63,120,154,0),24 },
+       { IPv4(63,121,1,0),24 },
+       { IPv4(63,121,28,0),22 },
+       { IPv4(63,121,58,0),24 },
+       { IPv4(63,121,84,0),24 },
+       { IPv4(63,121,111,0),24 },
+       { IPv4(63,121,136,0),22 },
+       { IPv4(63,121,144,0),23 },
+       { IPv4(63,121,159,0),24 },
+       { IPv4(63,122,8,0),24 },
+       { IPv4(63,122,36,0),24 },
+       { IPv4(63,122,152,0),24 },
+       { IPv4(63,122,154,0),24 },
+       { IPv4(63,122,155,0),24 },
+       { IPv4(63,123,103,0),24 },
+       { IPv4(63,124,17,0),24 },
+       { IPv4(63,124,32,0),19 },
+       { IPv4(63,124,124,0),24 },
+       { IPv4(63,124,132,0),24 },
+       { IPv4(63,125,6,0),23 },
+       { IPv4(63,125,15,0),24 },
+       { IPv4(63,125,162,0),23 },
+       { IPv4(63,125,222,0),24 },
+       { IPv4(63,125,226,0),24 },
+       { IPv4(63,126,178,0),24 },
+       { IPv4(63,126,208,0),21 },
+       { IPv4(63,127,10,0),23 },
+       { IPv4(63,127,192,0),21 },
+       { IPv4(63,136,64,0),20 },
+       { IPv4(63,136,80,0),22 },
+       { IPv4(63,137,26,0),24 },
+       { IPv4(63,137,252,0),22 },
+       { IPv4(63,139,32,0),20 },
+       { IPv4(63,140,55,0),24 },
+       { IPv4(63,140,132,0),24 },
+       { IPv4(63,140,134,0),24 },
+       { IPv4(63,140,137,0),24 },
+       { IPv4(63,144,15,0),24 },
+       { IPv4(63,144,116,0),24 },
+       { IPv4(63,144,220,0),24 },
+       { IPv4(63,144,236,0),24 },
+       { IPv4(63,145,47,0),24 },
+       { IPv4(63,145,50,0),24 },
+       { IPv4(63,145,61,0),24 },
+       { IPv4(63,145,66,0),24 },
+       { IPv4(63,145,71,0),24 },
+       { IPv4(63,145,72,0),24 },
+       { IPv4(63,145,73,0),24 },
+       { IPv4(63,145,74,0),24 },
+       { IPv4(63,145,76,0),24 },
+       { IPv4(63,145,77,0),24 },
+       { IPv4(63,145,79,0),24 },
+       { IPv4(63,145,80,0),23 },
+       { IPv4(63,145,167,0),24 },
+       { IPv4(63,145,171,0),24 },
+       { IPv4(63,145,192,0),24 },
+       { IPv4(63,145,197,0),24 },
+       { IPv4(63,145,199,0),24 },
+       { IPv4(63,145,200,0),24 },
+       { IPv4(63,145,203,0),24 },
+       { IPv4(63,145,209,0),24 },
+       { IPv4(63,145,210,0),24 },
+       { IPv4(63,145,212,0),24 },
+       { IPv4(63,145,215,0),24 },
+       { IPv4(63,145,226,0),23 },
+       { IPv4(63,146,36,0),24 },
+       { IPv4(63,146,71,0),24 },
+       { IPv4(63,146,93,0),24 },
+       { IPv4(63,146,144,0),24 },
+       { IPv4(63,146,152,0),24 },
+       { IPv4(63,146,154,0),24 },
+       { IPv4(63,146,237,0),24 },
+       { IPv4(63,146,242,0),23 },
+       { IPv4(63,146,252,0),24 },
+       { IPv4(63,147,1,0),24 },
+       { IPv4(63,147,4,0),24 },
+       { IPv4(63,147,6,0),24 },
+       { IPv4(63,147,32,0),20 },
+       { IPv4(63,147,108,0),22 },
+       { IPv4(63,147,128,0),21 },
+       { IPv4(63,147,156,0),22 },
+       { IPv4(63,147,196,0),24 },
+       { IPv4(63,147,200,0),22 },
+       { IPv4(63,148,39,0),24 },
+       { IPv4(63,148,77,0),24 },
+       { IPv4(63,148,93,0),24 },
+       { IPv4(63,148,107,0),24 },
+       { IPv4(63,149,26,0),24 },
+       { IPv4(63,149,28,0),24 },
+       { IPv4(63,149,75,0),24 },
+       { IPv4(63,149,98,0),24 },
+       { IPv4(63,149,100,0),24 },
+       { IPv4(63,149,102,0),24 },
+       { IPv4(63,149,103,0),24 },
+       { IPv4(63,149,113,0),24 },
+       { IPv4(63,149,118,0),24 },
+       { IPv4(63,149,121,0),24 },
+       { IPv4(63,149,125,0),24 },
+       { IPv4(63,149,126,0),24 },
+       { IPv4(63,149,199,0),24 },
+       { IPv4(63,149,232,0),24 },
+       { IPv4(63,150,4,0),24 },
+       { IPv4(63,150,7,0),24 },
+       { IPv4(63,150,44,0),24 },
+       { IPv4(63,150,69,0),24 },
+       { IPv4(63,150,71,0),24 },
+       { IPv4(63,150,72,0),22 },
+       { IPv4(63,150,158,0),23 },
+       { IPv4(63,150,158,0),24 },
+       { IPv4(63,150,160,0),20 },
+       { IPv4(63,150,164,0),24 },
+       { IPv4(63,150,166,0),24 },
+       { IPv4(63,150,167,0),24 },
+       { IPv4(63,150,169,0),24 },
+       { IPv4(63,150,173,0),24 },
+       { IPv4(63,150,174,0),24 },
+       { IPv4(63,150,175,0),24 },
+       { IPv4(63,150,210,0),23 },
+       { IPv4(63,150,213,0),24 },
+       { IPv4(63,151,12,0),24 },
+       { IPv4(63,151,14,0),24 },
+       { IPv4(63,151,15,0),24 },
+       { IPv4(63,151,32,0),21 },
+       { IPv4(63,151,86,0),23 },
+       { IPv4(63,151,137,0),24 },
+       { IPv4(63,151,148,0),22 },
+       { IPv4(63,151,155,0),24 },
+       { IPv4(63,151,191,0),24 },
+       { IPv4(63,151,220,0),22 },
+       { IPv4(63,151,240,0),21 },
+       { IPv4(63,160,32,0),21 },
+       { IPv4(63,160,36,0),24 },
+       { IPv4(63,160,129,0),24 },
+       { IPv4(63,161,4,0),23 },
+       { IPv4(63,161,14,0),24 },
+       { IPv4(63,161,51,0),24 },
+       { IPv4(63,161,73,0),24 },
+       { IPv4(63,161,112,0),24 },
+       { IPv4(63,161,204,0),22 },
+       { IPv4(63,162,36,0),24 },
+       { IPv4(63,162,253,0),24 },
+       { IPv4(63,163,76,0),23 },
+       { IPv4(63,163,160,0),19 },
+       { IPv4(63,164,221,0),24 },
+       { IPv4(63,165,90,0),24 },
+       { IPv4(63,165,127,0),24 },
+       { IPv4(63,165,191,0),24 },
+       { IPv4(63,166,28,0),23 },
+       { IPv4(63,166,30,0),24 },
+       { IPv4(63,166,56,0),24 },
+       { IPv4(63,166,100,0),24 },
+       { IPv4(63,166,114,0),24 },
+       { IPv4(63,166,116,0),22 },
+       { IPv4(63,166,144,0),24 },
+       { IPv4(63,166,226,0),24 },
+       { IPv4(63,167,8,0),23 },
+       { IPv4(63,167,44,0),22 },
+       { IPv4(63,167,108,0),24 },
+       { IPv4(63,167,126,0),24 },
+       { IPv4(63,167,126,0),23 },
+       { IPv4(63,167,127,0),24 },
+       { IPv4(63,167,160,0),24 },
+       { IPv4(63,167,204,0),24 },
+       { IPv4(63,167,205,0),24 },
+       { IPv4(63,167,206,0),24 },
+       { IPv4(63,167,207,0),24 },
+       { IPv4(63,167,208,0),20 },
+       { IPv4(63,168,117,0),24 },
+       { IPv4(63,168,244,0),23 },
+       { IPv4(63,169,11,0),24 },
+       { IPv4(63,169,100,0),24 },
+       { IPv4(63,169,120,0),21 },
+       { IPv4(63,169,132,0),24 },
+       { IPv4(63,169,190,0),24 },
+       { IPv4(63,170,14,0),24 },
+       { IPv4(63,170,78,0),24 },
+       { IPv4(63,170,208,0),24 },
+       { IPv4(63,170,254,0),23 },
+       { IPv4(63,171,3,0),24 },
+       { IPv4(63,171,66,0),24 },
+       { IPv4(63,171,98,0),23 },
+       { IPv4(63,171,251,0),24 },
+       { IPv4(63,172,2,0),24 },
+       { IPv4(63,172,189,0),24 },
+       { IPv4(63,173,76,0),23 },
+       { IPv4(63,173,180,0),22 },
+       { IPv4(63,174,16,0),20 },
+       { IPv4(63,174,82,0),23 },
+       { IPv4(63,174,120,0),21 },
+       { IPv4(63,174,209,0),24 },
+       { IPv4(63,175,32,0),20 },
+       { IPv4(63,175,68,0),22 },
+       { IPv4(63,175,96,0),24 },
+       { IPv4(63,192,112,0),20 },
+       { IPv4(63,192,141,0),24 },
+       { IPv4(63,194,96,0),19 },
+       { IPv4(63,196,192,0),20 },
+       { IPv4(63,198,37,0),24 },
+       { IPv4(63,201,0,0),20 },
+       { IPv4(63,201,7,0),24 },
+       { IPv4(63,201,12,0),22 },
+       { IPv4(63,201,16,0),20 },
+       { IPv4(63,201,154,0),24 },
+       { IPv4(63,202,128,0),20 },
+       { IPv4(63,202,144,0),20 },
+       { IPv4(63,202,150,0),24 },
+       { IPv4(63,202,152,0),22 },
+       { IPv4(63,210,101,0),24 },
+       { IPv4(63,210,255,0),24 },
+       { IPv4(63,211,38,0),23 },
+       { IPv4(63,214,242,0),24 },
+       { IPv4(63,215,70,0),24 },
+       { IPv4(63,221,60,0),24 },
+       { IPv4(63,224,168,0),24 },
+       { IPv4(63,224,189,0),24 },
+       { IPv4(63,224,244,0),24 },
+       { IPv4(63,225,13,0),24 },
+       { IPv4(63,225,63,0),24 },
+       { IPv4(63,226,73,0),24 },
+       { IPv4(63,226,74,0),24 },
+       { IPv4(63,226,75,0),24 },
+       { IPv4(63,226,76,0),24 },
+       { IPv4(63,226,110,0),23 },
+       { IPv4(63,226,158,0),24 },
+       { IPv4(63,226,166,0),24 },
+       { IPv4(63,227,154,0),23 },
+       { IPv4(63,227,188,0),24 },
+       { IPv4(63,227,192,0),24 },
+       { IPv4(63,228,26,0),24 },
+       { IPv4(63,228,28,0),24 },
+       { IPv4(63,228,156,0),23 },
+       { IPv4(63,228,214,0),23 },
+       { IPv4(63,228,220,0),22 },
+       { IPv4(63,229,89,0),24 },
+       { IPv4(63,229,90,0),24 },
+       { IPv4(63,229,91,0),24 },
+       { IPv4(63,229,92,0),24 },
+       { IPv4(63,229,93,0),24 },
+       { IPv4(63,229,94,0),24 },
+       { IPv4(63,229,95,0),24 },
+       { IPv4(63,229,96,0),24 },
+       { IPv4(63,229,104,0),24 },
+       { IPv4(63,229,108,0),24 },
+       { IPv4(63,229,144,0),20 },
+       { IPv4(63,229,182,0),24 },
+       { IPv4(63,229,183,0),24 },
+       { IPv4(63,230,115,0),24 },
+       { IPv4(63,230,116,0),23 },
+       { IPv4(63,230,176,0),22 },
+       { IPv4(63,230,181,0),24 },
+       { IPv4(63,230,182,0),23 },
+       { IPv4(63,230,184,0),22 },
+       { IPv4(63,230,240,0),23 },
+       { IPv4(63,230,250,0),24 },
+       { IPv4(63,232,123,0),24 },
+       { IPv4(63,232,160,0),22 },
+       { IPv4(63,232,164,0),22 },
+       { IPv4(63,232,168,0),22 },
+       { IPv4(63,232,172,0),22 },
+       { IPv4(63,232,176,0),22 },
+       { IPv4(63,232,180,0),24 },
+       { IPv4(63,232,181,0),24 },
+       { IPv4(63,232,183,0),24 },
+       { IPv4(63,232,186,0),24 },
+       { IPv4(63,232,187,0),24 },
+       { IPv4(63,232,188,0),22 },
+       { IPv4(63,233,196,0),24 },
+       { IPv4(63,233,224,0),22 },
+       { IPv4(63,234,56,0),22 },
+       { IPv4(63,234,60,0),24 },
+       { IPv4(63,236,76,0),23 },
+       { IPv4(63,236,112,0),21 },
+       { IPv4(63,236,120,0),24 },
+       { IPv4(63,236,120,0),23 },
+       { IPv4(63,236,142,0),23 },
+       { IPv4(63,236,176,0),22 },
+       { IPv4(63,236,184,0),22 },
+       { IPv4(63,236,250,0),24 },
+       { IPv4(63,237,39,0),24 },
+       { IPv4(63,237,60,0),24 },
+       { IPv4(63,237,80,0),23 },
+       { IPv4(63,237,114,0),24 },
+       { IPv4(63,237,116,0),24 },
+       { IPv4(63,237,125,0),24 },
+       { IPv4(63,237,126,0),24 },
+       { IPv4(63,237,171,0),24 },
+       { IPv4(63,237,186,0),24 },
+       { IPv4(63,237,201,0),24 },
+       { IPv4(63,237,220,0),24 },
+       { IPv4(63,237,225,0),24 },
+       { IPv4(63,237,226,0),24 },
+       { IPv4(63,237,230,0),23 },
+       { IPv4(63,237,233,0),24 },
+       { IPv4(63,237,236,0),24 },
+       { IPv4(63,237,238,0),24 },
+       { IPv4(63,237,239,0),24 },
+       { IPv4(63,237,244,0),24 },
+       { IPv4(63,237,245,0),24 },
+       { IPv4(63,237,246,0),24 },
+       { IPv4(63,238,48,0),22 },
+       { IPv4(63,238,70,0),24 },
+       { IPv4(63,238,79,0),24 },
+       { IPv4(63,238,96,0),22 },
+       { IPv4(63,238,121,0),24 },
+       { IPv4(63,238,128,0),22 },
+       { IPv4(63,238,152,0),22 },
+       { IPv4(63,238,156,0),23 },
+       { IPv4(63,238,160,0),19 },
+       { IPv4(63,238,215,0),24 },
+       { IPv4(63,238,226,0),24 },
+       { IPv4(63,238,230,0),24 },
+       { IPv4(63,238,231,0),24 },
+       { IPv4(63,239,2,0),24 },
+       { IPv4(63,239,5,0),24 },
+       { IPv4(63,239,6,0),24 },
+       { IPv4(63,239,48,0),21 },
+       { IPv4(63,239,60,0),22 },
+       { IPv4(63,239,92,0),24 },
+       { IPv4(63,239,102,0),24 },
+       { IPv4(63,239,116,0),24 },
+       { IPv4(63,239,144,0),24 },
+       { IPv4(63,239,145,0),24 },
+       { IPv4(63,239,148,0),24 },
+       { IPv4(63,239,149,0),24 },
+       { IPv4(63,239,150,0),24 },
+       { IPv4(63,239,163,0),24 },
+       { IPv4(63,239,199,0),24 },
+       { IPv4(63,239,204,0),23 },
+       { IPv4(63,239,204,0),24 },
+       { IPv4(63,239,205,0),24 },
+       { IPv4(63,239,211,0),24 },
+       { IPv4(63,239,240,0),20 },
+       { IPv4(63,240,0,0),15 },
+       { IPv4(63,240,0,0),18 },
+       { IPv4(63,240,4,0),24 },
+       { IPv4(63,240,55,0),24 },
+       { IPv4(63,240,64,0),19 },
+       { IPv4(63,240,128,0),18 },
+       { IPv4(63,240,192,0),19 },
+       { IPv4(63,240,224,0),19 },
+       { IPv4(63,241,0,0),18 },
+       { IPv4(63,241,16,0),21 },
+       { IPv4(63,241,44,0),23 },
+       { IPv4(63,241,48,0),21 },
+       { IPv4(63,241,59,0),24 },
+       { IPv4(63,241,61,0),24 },
+       { IPv4(63,241,62,0),24 },
+       { IPv4(63,241,63,0),24 },
+       { IPv4(63,241,64,0),19 },
+       { IPv4(63,241,91,0),24 },
+       { IPv4(63,241,128,0),18 },
+       { IPv4(63,241,192,0),18 },
+       { IPv4(63,242,0,0),16 },
+       { IPv4(63,249,13,0),24 },
+       { IPv4(63,249,14,0),23 },
+       { IPv4(63,249,16,0),21 },
+       { IPv4(63,249,64,0),19 },
+       { IPv4(63,250,128,0),20 },
+       { IPv4(63,250,144,0),24 },
+       { IPv4(63,250,144,0),20 },
+       { IPv4(63,250,145,0),24 },
+       { IPv4(63,250,146,0),24 },
+       { IPv4(63,250,147,0),24 },
+       { IPv4(63,250,148,0),24 },
+       { IPv4(63,250,150,0),24 },
+       { IPv4(63,250,151,0),24 },
+       { IPv4(63,250,152,0),24 },
+       { IPv4(63,250,153,0),24 },
+       { IPv4(63,250,154,0),24 },
+       { IPv4(63,250,155,0),24 },
+       { IPv4(63,250,156,0),24 },
+       { IPv4(63,250,157,0),24 },
+       { IPv4(63,250,158,0),24 },
+       { IPv4(63,250,159,0),24 },
+       { IPv4(63,250,160,0),20 },
+       { IPv4(63,250,192,0),19 },
+       { IPv4(63,251,0,0),20 },
+       { IPv4(63,251,32,0),20 },
+       { IPv4(63,251,33,0),24 },
+       { IPv4(63,251,35,0),24 },
+       { IPv4(63,251,36,0),24 },
+       { IPv4(63,251,37,0),24 },
+       { IPv4(63,251,40,0),21 },
+       { IPv4(63,251,42,0),24 },
+       { IPv4(63,251,44,0),23 },
+       { IPv4(63,251,48,0),20 },
+       { IPv4(63,251,49,0),24 },
+       { IPv4(63,251,52,0),24 },
+       { IPv4(63,251,60,0),24 },
+       { IPv4(63,251,64,0),24 },
+       { IPv4(63,251,64,0),20 },
+       { IPv4(63,251,65,0),24 },
+       { IPv4(63,251,75,0),24 },
+       { IPv4(63,251,78,0),24 },
+       { IPv4(63,251,80,0),20 },
+       { IPv4(63,251,86,0),23 },
+       { IPv4(63,251,93,0),24 },
+       { IPv4(63,251,95,0),24 },
+       { IPv4(63,251,96,0),24 },
+       { IPv4(63,251,96,0),20 },
+       { IPv4(63,251,106,0),24 },
+       { IPv4(63,251,110,0),24 },
+       { IPv4(63,251,112,0),20 },
+       { IPv4(63,251,118,0),24 },
+       { IPv4(63,251,121,0),24 },
+       { IPv4(63,251,128,0),20 },
+       { IPv4(63,251,140,0),24 },
+       { IPv4(63,251,144,0),20 },
+       { IPv4(63,251,156,0),24 },
+       { IPv4(63,251,160,0),20 },
+       { IPv4(63,251,174,0),24 },
+       { IPv4(63,251,176,0),20 },
+       { IPv4(63,251,192,0),24 },
+       { IPv4(63,251,192,0),19 },
+       { IPv4(63,251,203,0),24 },
+       { IPv4(63,251,208,0),20 },
+       { IPv4(63,251,208,0),21 },
+       { IPv4(63,251,212,0),24 },
+       { IPv4(63,251,213,0),24 },
+       { IPv4(63,251,224,0),24 },
+       { IPv4(63,251,224,0),19 },
+       { IPv4(63,251,228,0),24 },
+       { IPv4(63,251,233,0),24 },
+       { IPv4(63,251,234,0),24 },
+       { IPv4(63,251,239,0),24 },
+       { IPv4(63,251,242,0),23 },
+       { IPv4(63,251,247,0),24 },
+       { IPv4(63,251,251,0),24 },
+       { IPv4(64,0,0,0),14 },
+       { IPv4(64,0,8,0),21 },
+       { IPv4(64,0,25,0),24 },
+       { IPv4(64,3,138,0),24 },
+       { IPv4(64,3,139,0),24 },
+       { IPv4(64,4,0,0),18 },
+       { IPv4(64,4,128,0),19 },
+       { IPv4(64,4,147,0),24 },
+       { IPv4(64,4,148,0),23 },
+       { IPv4(64,4,192,0),19 },
+       { IPv4(64,5,71,0),24 },
+       { IPv4(64,5,224,0),24 },
+       { IPv4(64,5,225,0),24 },
+       { IPv4(64,5,226,0),24 },
+       { IPv4(64,6,64,0),20 },
+       { IPv4(64,6,128,0),20 },
+       { IPv4(64,6,144,0),20 },
+       { IPv4(64,6,176,0),20 },
+       { IPv4(64,7,64,0),19 },
+       { IPv4(64,7,128,0),20 },
+       { IPv4(64,8,128,0),18 },
+       { IPv4(64,8,192,0),18 },
+       { IPv4(64,9,52,0),24 },
+       { IPv4(64,12,0,0),16 },
+       { IPv4(64,12,0,0),20 },
+       { IPv4(64,13,0,0),16 },
+       { IPv4(64,13,64,0),20 },
+       { IPv4(64,13,80,0),20 },
+       { IPv4(64,14,9,0),24 },
+       { IPv4(64,14,74,0),23 },
+       { IPv4(64,14,136,0),24 },
+       { IPv4(64,14,136,0),23 },
+       { IPv4(64,15,162,0),24 },
+       { IPv4(64,15,165,0),24 },
+       { IPv4(64,15,166,0),24 },
+       { IPv4(64,15,194,0),23 },
+       { IPv4(64,16,133,0),24 },
+       { IPv4(64,16,136,0),24 },
+       { IPv4(64,16,147,0),24 },
+       { IPv4(64,16,160,0),24 },
+       { IPv4(64,16,170,0),24 },
+       { IPv4(64,16,173,0),24 },
+       { IPv4(64,16,176,0),24 },
+       { IPv4(64,16,180,0),24 },
+       { IPv4(64,16,184,0),24 },
+       { IPv4(64,16,189,0),24 },
+       { IPv4(64,17,10,0),24 },
+       { IPv4(64,17,19,0),24 },
+       { IPv4(64,17,44,0),24 },
+       { IPv4(64,17,59,0),24 },
+       { IPv4(64,17,60,0),22 },
+       { IPv4(64,17,208,0),20 },
+       { IPv4(64,20,48,0),20 },
+       { IPv4(64,21,0,0),17 },
+       { IPv4(64,21,49,0),24 },
+       { IPv4(64,21,56,0),23 },
+       { IPv4(64,21,68,0),23 },
+       { IPv4(64,21,79,0),24 },
+       { IPv4(64,21,102,0),23 },
+       { IPv4(64,21,128,0),18 },
+       { IPv4(64,21,192,0),19 },
+       { IPv4(64,22,132,0),22 },
+       { IPv4(64,22,132,0),24 },
+       { IPv4(64,22,136,0),24 },
+       { IPv4(64,22,192,0),19 },
+       { IPv4(64,23,217,0),24 },
+       { IPv4(64,24,80,0),20 },
+       { IPv4(64,24,112,0),21 },
+       { IPv4(64,24,112,0),20 },
+       { IPv4(64,24,120,0),21 },
+       { IPv4(64,26,64,0),18 },
+       { IPv4(64,26,128,0),18 },
+       { IPv4(64,26,192,0),19 },
+       { IPv4(64,26,224,0),19 },
+       { IPv4(64,27,64,0),18 },
+       { IPv4(64,28,0,0),19 },
+       { IPv4(64,28,68,0),23 },
+       { IPv4(64,28,144,0),20 },
+       { IPv4(64,29,16,0),20 },
+       { IPv4(64,29,32,0),20 },
+       { IPv4(64,29,64,0),19 },
+       { IPv4(64,29,64,0),20 },
+       { IPv4(64,29,70,0),24 },
+       { IPv4(64,29,71,0),24 },
+       { IPv4(64,29,80,0),24 },
+       { IPv4(64,29,87,0),24 },
+       { IPv4(64,29,94,0),24 },
+       { IPv4(64,29,96,0),20 },
+       { IPv4(64,29,160,0),20 },
+       { IPv4(64,29,168,0),22 },
+       { IPv4(64,29,172,0),22 },
+       { IPv4(64,29,224,0),20 },
+       { IPv4(64,30,17,0),24 },
+       { IPv4(64,30,26,0),24 },
+       { IPv4(64,30,34,0),24 },
+       { IPv4(64,30,128,0),19 },
+       { IPv4(64,30,224,0),20 },
+       { IPv4(64,31,0,0),19 },
+       { IPv4(64,33,120,0),24 },
+       { IPv4(64,33,128,0),18 },
+       { IPv4(64,35,0,0),18 },
+       { IPv4(64,35,0,0),17 },
+       { IPv4(64,35,128,0),20 },
+       { IPv4(64,35,172,0),24 },
+       { IPv4(64,37,64,0),19 },
+       { IPv4(64,37,96,0),19 },
+       { IPv4(64,37,128,0),21 },
+       { IPv4(64,37,144,0),20 },
+       { IPv4(64,38,96,0),19 },
+       { IPv4(64,38,128,0),18 },
+       { IPv4(64,39,0,0),19 },
+       { IPv4(64,39,96,0),20 },
+       { IPv4(64,39,192,0),19 },
+       { IPv4(64,40,0,0),20 },
+       { IPv4(64,40,32,0),19 },
+       { IPv4(64,40,96,0),20 },
+       { IPv4(64,41,152,0),24 },
+       { IPv4(64,41,152,0),21 },
+       { IPv4(64,41,255,0),24 },
+       { IPv4(64,42,0,0),17 },
+       { IPv4(64,42,128,0),18 },
+       { IPv4(64,43,0,0),16 },
+       { IPv4(64,44,40,0),23 },
+       { IPv4(64,45,128,0),19 },
+       { IPv4(64,46,128,0),19 },
+       { IPv4(64,46,160,0),20 },
+       { IPv4(64,46,192,0),18 },
+       { IPv4(64,48,0,0),16 },
+       { IPv4(64,48,128,0),18 },
+       { IPv4(64,48,190,0),24 },
+       { IPv4(64,49,0,0),18 },
+       { IPv4(64,49,128,0),18 },
+       { IPv4(64,49,192,0),19 },
+       { IPv4(64,50,0,0),17 },
+       { IPv4(64,50,7,0),24 },
+       { IPv4(64,50,8,0),22 },
+       { IPv4(64,50,64,0),20 },
+       { IPv4(64,50,97,0),24 },
+       { IPv4(64,50,107,0),24 },
+       { IPv4(64,50,124,0),24 },
+       { IPv4(64,50,125,0),24 },
+       { IPv4(64,50,128,0),19 },
+       { IPv4(64,50,160,0),19 },
+       { IPv4(64,50,192,0),19 },
+       { IPv4(64,52,32,0),19 },
+       { IPv4(64,52,64,0),20 },
+       { IPv4(64,52,112,0),20 },
+       { IPv4(64,52,192,0),19 },
+       { IPv4(64,53,0,0),18 },
+       { IPv4(64,53,64,0),18 },
+       { IPv4(64,54,0,0),16 },
+       { IPv4(64,55,0,0),16 },
+       { IPv4(64,55,0,0),17 },
+       { IPv4(64,55,128,0),17 },
+       { IPv4(64,56,0,0),19 },
+       { IPv4(64,56,96,0),20 },
+       { IPv4(64,56,224,0),20 },
+       { IPv4(64,57,0,0),20 },
+       { IPv4(64,57,224,0),20 },
+       { IPv4(64,58,128,0),19 },
+       { IPv4(64,58,158,0),23 },
+       { IPv4(64,58,160,0),19 },
+       { IPv4(64,58,168,0),23 },
+       { IPv4(64,58,185,0),24 },
+       { IPv4(64,58,190,0),23 },
+       { IPv4(64,59,0,0),23 },
+       { IPv4(64,59,0,0),18 },
+       { IPv4(64,59,10,0),23 },
+       { IPv4(64,59,20,0),23 },
+       { IPv4(64,59,128,0),19 },
+       { IPv4(64,59,128,0),18 },
+       { IPv4(64,59,224,0),19 },
+       { IPv4(64,60,112,0),21 },
+       { IPv4(64,60,120,0),21 },
+       { IPv4(64,60,208,0),20 },
+       { IPv4(64,60,224,0),20 },
+       { IPv4(64,60,240,0),20 },
+       { IPv4(64,61,29,0),24 },
+       { IPv4(64,62,0,0),21 },
+       { IPv4(64,62,0,0),17 },
+       { IPv4(64,62,12,0),22 },
+       { IPv4(64,62,94,0),24 },
+       { IPv4(64,62,104,0),22 },
+       { IPv4(64,62,112,0),24 },
+       { IPv4(64,62,120,0),24 },
+       { IPv4(64,62,125,0),24 },
+       { IPv4(64,63,0,0),20 },
+       { IPv4(64,63,32,0),19 },
+       { IPv4(64,63,64,0),19 },
+       { IPv4(64,63,112,0),20 },
+       { IPv4(64,63,128,0),20 },
+       { IPv4(64,63,176,0),20 },
+       { IPv4(64,65,0,0),18 },
+       { IPv4(64,66,28,0),24 },
+       { IPv4(64,66,32,0),22 },
+       { IPv4(64,66,32,0),20 },
+       { IPv4(64,68,0,0),19 },
+       { IPv4(64,68,32,0),19 },
+       { IPv4(64,68,96,0),19 },
+       { IPv4(64,68,102,0),23 },
+       { IPv4(64,68,128,0),22 },
+       { IPv4(64,68,192,0),20 },
+       { IPv4(64,69,16,0),20 },
+       { IPv4(64,69,64,0),19 },
+       { IPv4(64,69,128,0),20 },
+       { IPv4(64,69,208,0),20 },
+       { IPv4(64,70,4,0),22 },
+       { IPv4(64,70,68,0),22 },
+       { IPv4(64,70,128,0),17 },
+       { IPv4(64,71,64,0),22 },
+       { IPv4(64,71,64,0),19 },
+       { IPv4(64,71,128,0),18 },
+       { IPv4(64,73,0,0),17 },
+       { IPv4(64,73,128,0),18 },
+       { IPv4(64,74,0,0),19 },
+       { IPv4(64,74,5,0),24 },
+       { IPv4(64,74,16,0),22 },
+       { IPv4(64,74,32,0),19 },
+       { IPv4(64,74,36,0),23 },
+       { IPv4(64,74,38,0),24 },
+       { IPv4(64,74,39,0),24 },
+       { IPv4(64,74,44,0),24 },
+       { IPv4(64,74,46,0),24 },
+       { IPv4(64,74,46,0),23 },
+       { IPv4(64,74,47,0),24 },
+       { IPv4(64,74,63,0),24 },
+       { IPv4(64,75,25,0),24 },
+       { IPv4(64,75,26,0),24 },
+       { IPv4(64,75,64,0),20 },
+       { IPv4(64,75,111,0),24 },
+       { IPv4(64,75,112,0),20 },
+       { IPv4(64,75,128,0),18 },
+       { IPv4(64,75,129,0),24 },
+       { IPv4(64,75,133,0),24 },
+       { IPv4(64,75,134,0),24 },
+       { IPv4(64,75,144,0),22 },
+       { IPv4(64,75,148,0),24 },
+       { IPv4(64,75,149,0),24 },
+       { IPv4(64,75,150,0),23 },
+       { IPv4(64,75,152,0),23 },
+       { IPv4(64,75,154,0),23 },
+       { IPv4(64,75,156,0),23 },
+       { IPv4(64,75,158,0),24 },
+       { IPv4(64,75,168,0),24 },
+       { IPv4(64,75,170,0),24 },
+       { IPv4(64,75,176,0),22 },
+       { IPv4(64,75,180,0),23 },
+       { IPv4(64,75,182,0),23 },
+       { IPv4(64,75,184,0),22 },
+       { IPv4(64,75,188,0),24 },
+       { IPv4(64,75,189,0),24 },
+       { IPv4(64,76,68,0),22 },
+       { IPv4(64,76,72,0),21 },
+       { IPv4(64,76,152,0),24 },
+       { IPv4(64,78,0,0),18 },
+       { IPv4(64,78,64,0),19 },
+       { IPv4(64,78,64,0),18 },
+       { IPv4(64,79,0,0),19 },
+       { IPv4(64,79,64,0),19 },
+       { IPv4(64,79,96,0),20 },
+       { IPv4(64,79,224,0),20 },
+       { IPv4(64,80,0,0),16 },
+       { IPv4(64,81,0,0),19 },
+       { IPv4(64,81,32,0),20 },
+       { IPv4(64,81,48,0),20 },
+       { IPv4(64,81,64,0),20 },
+       { IPv4(64,81,80,0),20 },
+       { IPv4(64,81,96,0),20 },
+       { IPv4(64,81,112,0),20 },
+       { IPv4(64,81,128,0),21 },
+       { IPv4(64,81,136,0),21 },
+       { IPv4(64,81,144,0),20 },
+       { IPv4(64,81,160,0),19 },
+       { IPv4(64,81,176,0),23 },
+       { IPv4(64,81,192,0),19 },
+       { IPv4(64,81,224,0),21 },
+       { IPv4(64,81,232,0),21 },
+       { IPv4(64,81,240,0),20 },
+       { IPv4(64,82,0,0),17 },
+       { IPv4(64,83,160,0),20 },
+       { IPv4(64,84,24,0),23 },
+       { IPv4(64,84,26,0),24 },
+       { IPv4(64,84,32,0),22 },
+       { IPv4(64,84,41,0),24 },
+       { IPv4(64,86,16,0),21 },
+       { IPv4(64,86,224,0),24 },
+       { IPv4(64,86,225,0),24 },
+       { IPv4(64,86,253,0),24 },
+       { IPv4(64,86,254,0),24 },
+       { IPv4(64,87,64,0),19 },
+       { IPv4(64,88,128,0),19 },
+       { IPv4(64,89,96,0),19 },
+       { IPv4(64,89,106,0),24 },
+       { IPv4(64,89,107,0),24 },
+       { IPv4(64,89,110,0),23 },
+       { IPv4(64,89,160,0),20 },
+       { IPv4(64,89,224,0),20 },
+       { IPv4(64,90,0,0),23 },
+       { IPv4(64,90,0,0),19 },
+       { IPv4(64,90,2,0),24 },
+       { IPv4(64,90,3,0),24 },
+       { IPv4(64,90,4,0),24 },
+       { IPv4(64,90,5,0),24 },
+       { IPv4(64,90,6,0),24 },
+       { IPv4(64,90,7,0),24 },
+       { IPv4(64,90,8,0),24 },
+       { IPv4(64,90,9,0),24 },
+       { IPv4(64,90,10,0),24 },
+       { IPv4(64,90,12,0),24 },
+       { IPv4(64,90,13,0),24 },
+       { IPv4(64,90,14,0),24 },
+       { IPv4(64,90,15,0),24 },
+       { IPv4(64,90,16,0),24 },
+       { IPv4(64,90,18,0),24 },
+       { IPv4(64,90,19,0),24 },
+       { IPv4(64,90,20,0),23 },
+       { IPv4(64,90,22,0),24 },
+       { IPv4(64,90,23,0),24 },
+       { IPv4(64,90,24,0),23 },
+       { IPv4(64,90,26,0),24 },
+       { IPv4(64,90,27,0),24 },
+       { IPv4(64,90,28,0),24 },
+       { IPv4(64,90,29,0),24 },
+       { IPv4(64,90,30,0),24 },
+       { IPv4(64,90,31,0),24 },
+       { IPv4(64,90,32,0),19 },
+       { IPv4(64,90,64,0),20 },
+       { IPv4(64,90,240,0),20 },
+       { IPv4(64,91,224,0),20 },
+       { IPv4(64,92,75,0),24 },
+       { IPv4(64,94,0,0),20 },
+       { IPv4(64,94,6,0),24 },
+       { IPv4(64,94,13,0),24 },
+       { IPv4(64,94,15,0),24 },
+       { IPv4(64,94,16,0),20 },
+       { IPv4(64,94,30,0),24 },
+       { IPv4(64,94,31,0),24 },
+       { IPv4(64,94,32,0),20 },
+       { IPv4(64,94,48,0),22 },
+       { IPv4(64,94,48,0),20 },
+       { IPv4(64,94,49,0),24 },
+       { IPv4(64,94,57,0),24 },
+       { IPv4(64,94,58,0),24 },
+       { IPv4(64,94,62,0),24 },
+       { IPv4(64,94,64,0),19 },
+       { IPv4(64,94,68,0),24 },
+       { IPv4(64,94,70,0),24 },
+       { IPv4(64,94,78,0),24 },
+       { IPv4(64,94,81,0),24 },
+       { IPv4(64,94,82,0),24 },
+       { IPv4(64,94,83,0),24 },
+       { IPv4(64,94,88,0),24 },
+       { IPv4(64,94,89,0),24 },
+       { IPv4(64,94,93,0),24 },
+       { IPv4(64,94,94,0),24 },
+       { IPv4(64,94,95,0),24 },
+       { IPv4(64,94,96,0),23 },
+       { IPv4(64,94,98,0),24 },
+       { IPv4(64,94,99,0),24 },
+       { IPv4(64,94,108,0),24 },
+       { IPv4(64,94,112,0),20 },
+       { IPv4(64,94,128,0),22 },
+       { IPv4(64,94,128,0),20 },
+       { IPv4(64,94,144,0),22 },
+       { IPv4(64,94,144,0),20 },
+       { IPv4(64,94,151,0),24 },
+       { IPv4(64,94,152,0),22 },
+       { IPv4(64,94,162,0),24 },
+       { IPv4(64,94,170,0),24 },
+       { IPv4(64,94,174,0),24 },
+       { IPv4(64,94,175,0),24 },
+       { IPv4(64,94,180,0),23 },
+       { IPv4(64,94,182,0),24 },
+       { IPv4(64,94,182,0),23 },
+       { IPv4(64,94,188,0),23 },
+       { IPv4(64,94,189,0),24 },
+       { IPv4(64,94,199,0),24 },
+       { IPv4(64,94,202,0),23 },
+       { IPv4(64,94,202,0),24 },
+       { IPv4(64,94,208,0),20 },
+       { IPv4(64,94,214,0),23 },
+       { IPv4(64,94,218,0),24 },
+       { IPv4(64,94,223,0),24 },
+       { IPv4(64,94,224,0),20 },
+       { IPv4(64,94,224,0),21 },
+       { IPv4(64,94,238,0),24 },
+       { IPv4(64,94,240,0),20 },
+       { IPv4(64,95,0,0),19 },
+       { IPv4(64,95,9,0),24 },
+       { IPv4(64,95,12,0),24 },
+       { IPv4(64,95,18,0),24 },
+       { IPv4(64,95,20,0),24 },
+       { IPv4(64,95,26,0),24 },
+       { IPv4(64,95,28,0),24 },
+       { IPv4(64,95,29,0),24 },
+       { IPv4(64,95,48,0),20 },
+       { IPv4(64,95,64,0),20 },
+       { IPv4(64,95,74,0),24 },
+       { IPv4(64,95,80,0),20 },
+       { IPv4(64,95,94,0),24 },
+       { IPv4(64,95,95,0),24 },
+       { IPv4(64,95,96,0),20 },
+       { IPv4(64,95,96,0),21 },
+       { IPv4(64,95,100,0),22 },
+       { IPv4(64,95,112,0),20 },
+       { IPv4(64,95,118,0),24 },
+       { IPv4(64,95,119,0),24 },
+       { IPv4(64,95,128,0),20 },
+       { IPv4(64,95,160,0),19 },
+       { IPv4(64,95,168,0),22 },
+       { IPv4(64,95,172,0),23 },
+       { IPv4(64,95,180,0),22 },
+       { IPv4(64,95,189,0),24 },
+       { IPv4(64,95,192,0),20 },
+       { IPv4(64,95,208,0),20 },
+       { IPv4(64,95,221,0),24 },
+       { IPv4(64,95,222,0),24 },
+       { IPv4(64,95,223,0),24 },
+       { IPv4(64,95,224,0),24 },
+       { IPv4(64,95,224,0),20 },
+       { IPv4(64,95,225,0),24 },
+       { IPv4(64,95,226,0),24 },
+       { IPv4(64,95,227,0),24 },
+       { IPv4(64,95,238,0),24 },
+       { IPv4(64,95,240,0),20 },
+       { IPv4(64,100,0,0),14 },
+       { IPv4(64,102,0,0),16 },
+       { IPv4(64,104,0,0),16 },
+       { IPv4(64,107,0,0),17 },
+       { IPv4(64,107,128,0),17 },
+       { IPv4(64,110,0,0),19 },
+       { IPv4(64,110,15,0),24 },
+       { IPv4(64,110,22,0),24 },
+       { IPv4(64,110,24,0),22 },
+       { IPv4(64,110,24,0),24 },
+       { IPv4(64,110,27,0),24 },
+       { IPv4(64,110,28,0),22 },
+       { IPv4(64,110,29,0),24 },
+       { IPv4(64,110,32,0),20 },
+       { IPv4(64,110,36,0),23 },
+       { IPv4(64,110,48,0),20 },
+       { IPv4(64,110,48,0),21 },
+       { IPv4(64,110,51,0),24 },
+       { IPv4(64,110,54,0),23 },
+       { IPv4(64,110,56,0),22 },
+       { IPv4(64,110,60,0),22 },
+       { IPv4(64,110,64,0),20 },
+       { IPv4(64,110,75,0),24 },
+       { IPv4(64,110,76,0),23 },
+       { IPv4(64,110,79,0),24 },
+       { IPv4(64,110,80,0),20 },
+       { IPv4(64,110,96,0),20 },
+       { IPv4(64,110,104,0),21 },
+       { IPv4(64,110,112,0),20 },
+       { IPv4(64,110,112,0),24 },
+       { IPv4(64,110,113,0),24 },
+       { IPv4(64,110,114,0),24 },
+       { IPv4(64,110,115,0),24 },
+       { IPv4(64,110,121,0),24 },
+       { IPv4(64,110,122,0),24 },
+       { IPv4(64,110,126,0),24 },
+       { IPv4(64,110,128,0),20 },
+       { IPv4(64,110,128,0),21 },
+       { IPv4(64,110,133,0),24 },
+       { IPv4(64,110,136,0),21 },
+       { IPv4(64,110,144,0),20 },
+       { IPv4(64,110,144,0),21 },
+       { IPv4(64,110,148,0),23 },
+       { IPv4(64,110,148,0),22 },
+       { IPv4(64,110,150,0),23 },
+       { IPv4(64,110,156,0),22 },
+       { IPv4(64,110,160,0),20 },
+       { IPv4(64,110,166,0),24 },
+       { IPv4(64,110,176,0),20 },
+       { IPv4(64,110,190,0),23 },
+       { IPv4(64,111,48,0),20 },
+       { IPv4(64,112,16,0),22 },
+       { IPv4(64,112,64,0),21 },
+       { IPv4(64,113,64,0),19 },
+       { IPv4(64,113,192,0),19 },
+       { IPv4(64,113,208,0),23 },
+       { IPv4(64,115,0,0),19 },
+       { IPv4(64,118,64,0),20 },
+       { IPv4(64,118,96,0),21 },
+       { IPv4(64,118,128,0),20 },
+       { IPv4(64,118,130,0),24 },
+       { IPv4(64,118,131,0),24 },
+       { IPv4(64,118,140,0),24 },
+       { IPv4(64,118,143,0),24 },
+       { IPv4(64,119,32,0),20 },
+       { IPv4(64,119,128,0),20 },
+       { IPv4(64,119,160,0),20 },
+       { IPv4(64,121,0,0),16 },
+       { IPv4(64,122,0,0),19 },
+       { IPv4(64,122,16,0),20 },
+       { IPv4(64,122,64,0),20 },
+       { IPv4(64,123,96,0),21 },
+       { IPv4(64,123,195,0),24 },
+       { IPv4(64,124,0,0),15 },
+       { IPv4(64,124,6,0),24 },
+       { IPv4(64,124,31,0),24 },
+       { IPv4(64,124,37,0),24 },
+       { IPv4(64,124,38,0),24 },
+       { IPv4(64,124,41,0),24 },
+       { IPv4(64,124,63,0),24 },
+       { IPv4(64,124,70,0),24 },
+       { IPv4(64,124,92,0),24 },
+       { IPv4(64,124,106,0),23 },
+       { IPv4(64,124,147,0),24 },
+       { IPv4(64,124,148,0),24 },
+       { IPv4(64,124,150,0),24 },
+       { IPv4(64,124,152,0),24 },
+       { IPv4(64,124,169,0),24 },
+       { IPv4(64,124,212,0),24 },
+       { IPv4(64,124,213,0),24 },
+       { IPv4(64,124,236,0),23 },
+       { IPv4(64,124,236,0),22 },
+       { IPv4(64,124,239,0),24 },
+       { IPv4(64,125,88,0),21 },
+       { IPv4(64,125,132,0),22 },
+       { IPv4(64,125,133,0),24 },
+       { IPv4(64,125,134,0),24 },
+       { IPv4(64,125,135,0),24 },
+       { IPv4(64,125,140,0),24 },
+       { IPv4(64,125,178,0),23 },
+       { IPv4(64,125,179,0),24 },
+       { IPv4(64,125,192,0),22 },
+       { IPv4(64,125,248,0),22 },
+       { IPv4(64,126,0,0),18 },
+       { IPv4(64,127,0,0),18 },
+       { IPv4(64,132,2,0),23 },
+       { IPv4(64,132,14,0),24 },
+       { IPv4(64,132,26,0),24 },
+       { IPv4(64,132,83,0),24 },
+       { IPv4(64,132,84,0),24 },
+       { IPv4(64,134,12,0),24 },
+       { IPv4(64,134,16,0),22 },
+       { IPv4(64,134,20,0),23 },
+       { IPv4(64,134,29,0),24 },
+       { IPv4(64,134,126,0),24 },
+       { IPv4(64,139,16,0),20 },
+       { IPv4(64,139,32,0),20 },
+       { IPv4(64,146,0,0),20 },
+       { IPv4(64,146,9,0),24 },
+       { IPv4(64,147,0,0),19 },
+       { IPv4(64,147,192,0),20 },
+       { IPv4(64,148,0,0),16 },
+       { IPv4(64,148,224,0),20 },
+       { IPv4(64,152,6,0),24 },
+       { IPv4(64,152,7,0),24 },
+       { IPv4(64,152,8,0),22 },
+       { IPv4(64,152,12,0),24 },
+       { IPv4(64,152,13,0),24 },
+       { IPv4(64,152,20,0),24 },
+       { IPv4(64,152,21,0),24 },
+       { IPv4(64,152,108,0),24 },
+       { IPv4(64,152,110,0),24 },
+       { IPv4(64,152,111,0),24 },
+       { IPv4(64,152,121,0),24 },
+       { IPv4(64,152,176,0),21 },
+       { IPv4(64,152,195,0),24 },
+       { IPv4(64,154,10,0),23 },
+       { IPv4(64,154,176,0),21 },
+       { IPv4(64,154,194,0),23 },
+       { IPv4(64,156,13,0),24 },
+       { IPv4(64,156,44,0),23 },
+       { IPv4(64,156,50,0),24 },
+       { IPv4(64,156,180,0),23 },
+       { IPv4(64,157,32,0),21 },
+       { IPv4(64,157,129,0),24 },
+       { IPv4(64,157,130,0),24 },
+       { IPv4(64,157,131,0),24 },
+       { IPv4(64,157,171,0),24 },
+       { IPv4(64,157,232,0),22 },
+       { IPv4(64,158,116,0),24 },
+       { IPv4(64,158,118,0),24 },
+       { IPv4(64,160,116,0),22 },
+       { IPv4(64,161,32,0),20 },
+       { IPv4(64,161,48,0),20 },
+       { IPv4(64,161,121,0),24 },
+       { IPv4(64,162,79,0),24 },
+       { IPv4(64,162,99,0),24 },
+       { IPv4(64,162,108,0),23 },
+       { IPv4(64,162,222,0),24 },
+       { IPv4(64,164,59,0),24 },
+       { IPv4(64,164,232,0),24 },
+       { IPv4(64,165,105,0),24 },
+       { IPv4(64,166,160,0),20 },
+       { IPv4(64,168,192,0),20 },
+       { IPv4(64,169,0,0),20 },
+       { IPv4(64,169,41,0),24 },
+       { IPv4(64,173,192,0),20 },
+       { IPv4(64,173,208,0),20 },
+       { IPv4(64,178,0,0),18 },
+       { IPv4(64,178,64,0),19 },
+       { IPv4(64,181,1,0),24 },
+       { IPv4(64,185,128,0),21 },
+       { IPv4(64,185,136,0),22 },
+       { IPv4(64,185,140,0),22 },
+       { IPv4(64,185,140,0),23 },
+       { IPv4(64,185,142,0),23 },
+       { IPv4(64,185,144,0),20 },
+       { IPv4(64,185,144,0),21 },
+       { IPv4(64,185,152,0),21 },
+       { IPv4(64,186,64,0),21 },
+       { IPv4(64,186,72,0),22 },
+       { IPv4(64,186,96,0),20 },
+       { IPv4(64,186,128,0),20 },
+       { IPv4(64,186,160,0),20 },
+       { IPv4(64,186,232,0),22 },
+       { IPv4(64,188,158,0),23 },
+       { IPv4(64,200,0,0),22 },
+       { IPv4(64,200,0,0),16 },
+       { IPv4(64,200,4,0),22 },
+       { IPv4(64,200,16,0),22 },
+       { IPv4(64,200,32,0),21 },
+       { IPv4(64,200,80,0),23 },
+       { IPv4(64,200,82,0),24 },
+       { IPv4(64,200,88,0),23 },
+       { IPv4(64,200,90,0),23 },
+       { IPv4(64,200,92,0),22 },
+       { IPv4(64,200,96,0),23 },
+       { IPv4(64,200,98,0),24 },
+       { IPv4(64,200,99,0),24 },
+       { IPv4(64,200,100,0),24 },
+       { IPv4(64,200,104,0),23 },
+       { IPv4(64,200,112,0),23 },
+       { IPv4(64,200,136,0),23 },
+       { IPv4(64,200,144,0),21 },
+       { IPv4(64,200,144,0),24 },
+       { IPv4(64,200,145,0),24 },
+       { IPv4(64,200,170,0),23 },
+       { IPv4(64,200,172,0),24 },
+       { IPv4(64,200,173,0),24 },
+       { IPv4(64,200,180,0),23 },
+       { IPv4(64,200,184,0),23 },
+       { IPv4(64,200,187,0),24 },
+       { IPv4(64,200,188,0),23 },
+       { IPv4(64,200,192,0),23 },
+       { IPv4(64,200,212,0),24 },
+       { IPv4(64,200,253,0),24 },
+       { IPv4(64,208,52,0),23 },
+       { IPv4(64,208,56,0),23 },
+       { IPv4(64,208,186,0),23 },
+       { IPv4(64,208,240,0),24 },
+       { IPv4(64,209,32,0),23 },
+       { IPv4(64,209,70,0),23 },
+       { IPv4(64,209,92,0),24 },
+       { IPv4(64,209,189,0),24 },
+       { IPv4(64,209,201,0),24 },
+       { IPv4(64,210,75,0),24 },
+       { IPv4(64,210,178,0),24 },
+       { IPv4(64,211,101,0),24 },
+       { IPv4(64,211,184,0),21 },
+       { IPv4(64,211,230,0),24 },
+       { IPv4(64,212,8,0),21 },
+       { IPv4(64,212,152,0),24 },
+       { IPv4(64,212,170,0),24 },
+       { IPv4(64,212,171,0),24 },
+       { IPv4(64,213,66,0),23 },
+       { IPv4(64,213,130,0),24 },
+       { IPv4(64,214,85,0),24 },
+       { IPv4(64,216,96,0),20 },
+       { IPv4(64,217,32,0),20 },
+       { IPv4(64,220,0,0),15 },
+       { IPv4(64,220,201,0),24 },
+       { IPv4(64,221,95,0),24 },
+       { IPv4(64,221,168,0),21 },
+       { IPv4(64,221,207,0),24 },
+       { IPv4(64,221,223,0),24 },
+       { IPv4(64,221,232,0),24 },
+       { IPv4(64,224,0,0),17 },
+       { IPv4(64,224,128,0),17 },
+       { IPv4(64,225,0,0),16 },
+       { IPv4(64,226,0,0),16 },
+       { IPv4(64,232,0,0),16 },
+       { IPv4(64,232,0,0),22 },
+       { IPv4(64,232,52,0),23 },
+       { IPv4(64,232,88,0),24 },
+       { IPv4(64,232,95,0),24 },
+       { IPv4(64,232,116,0),23 },
+       { IPv4(64,232,133,0),24 },
+       { IPv4(64,232,138,0),24 },
+       { IPv4(64,232,152,0),21 },
+       { IPv4(64,232,160,0),20 },
+       { IPv4(64,232,187,0),24 },
+       { IPv4(64,232,196,0),24 },
+       { IPv4(64,232,200,0),24 },
+       { IPv4(64,232,206,0),24 },
+       { IPv4(64,232,212,0),23 },
+       { IPv4(64,232,252,0),22 },
+       { IPv4(64,233,0,0),17 },
+       { IPv4(64,233,8,0),21 },
+       { IPv4(64,233,16,0),22 },
+       { IPv4(64,236,0,0),16 },
+       { IPv4(64,236,12,0),24 },
+       { IPv4(64,236,16,0),21 },
+       { IPv4(64,238,0,0),20 },
+       { IPv4(64,238,128,0),20 },
+       { IPv4(64,238,224,0),20 },
+       { IPv4(64,239,0,0),18 },
+       { IPv4(64,239,128,0),18 },
+       { IPv4(64,240,69,0),24 },
+       { IPv4(64,240,93,0),24 },
+       { IPv4(64,241,64,0),24 },
+       { IPv4(64,242,40,0),24 },
+       { IPv4(64,242,41,0),24 },
+       { IPv4(64,242,42,0),24 },
+       { IPv4(64,242,43,0),24 },
+       { IPv4(64,242,117,0),24 },
+       { IPv4(64,242,118,0),23 },
+       { IPv4(64,242,216,0),22 },
+       { IPv4(64,242,250,0),23 },
+       { IPv4(64,243,232,0),22 },
+       { IPv4(64,244,80,0),21 },
+       { IPv4(64,244,115,0),24 },
+       { IPv4(64,244,120,0),21 },
+       { IPv4(64,244,223,0),24 },
+       { IPv4(64,245,48,0),20 },
+       { IPv4(64,245,96,0),21 },
+       { IPv4(64,245,224,0),21 },
+       { IPv4(64,247,0,0),19 },
+       { IPv4(64,250,128,0),18 },
+       { IPv4(64,251,32,0),20 },
+       { IPv4(64,251,64,0),20 },
+       { IPv4(64,251,160,0),20 },
+       { IPv4(64,251,240,0),20 },
+       { IPv4(64,252,0,0),16 },
+       { IPv4(64,252,224,0),19 },
+       { IPv4(64,253,0,0),19 },
+       { IPv4(64,253,9,0),24 },
+       { IPv4(64,253,10,0),24 },
+       { IPv4(64,253,32,0),19 },
+       { IPv4(64,253,32,0),22 },
+       { IPv4(64,253,36,0),23 },
+       { IPv4(64,253,96,0),20 },
+       { IPv4(64,254,32,0),19 },
+       { IPv4(64,254,96,0),20 },
+       { IPv4(64,254,96,0),24 },
+       { IPv4(64,254,97,0),24 },
+       { IPv4(64,254,98,0),24 },
+       { IPv4(64,254,99,0),24 },
+       { IPv4(64,254,100,0),24 },
+       { IPv4(64,254,102,0),24 },
+       { IPv4(64,254,103,0),24 },
+       { IPv4(64,254,108,0),24 },
+       { IPv4(64,254,160,0),20 },
+       { IPv4(64,254,161,0),24 },
+       { IPv4(64,254,163,0),24 },
+       { IPv4(64,254,165,0),24 },
+       { IPv4(64,254,166,0),24 },
+       { IPv4(64,254,170,0),24 },
+       { IPv4(64,254,172,0),23 },
+       { IPv4(64,254,174,0),23 },
+       { IPv4(64,255,64,0),19 },
+       { IPv4(65,0,0,0),14 },
+       { IPv4(65,0,0,0),17 },
+       { IPv4(65,0,0,0),12 },
+       { IPv4(65,0,0,0),13 },
+       { IPv4(65,0,128,0),17 },
+       { IPv4(65,1,0,0),17 },
+       { IPv4(65,1,128,0),17 },
+       { IPv4(65,2,0,0),17 },
+       { IPv4(65,2,128,0),18 },
+       { IPv4(65,2,192,0),19 },
+       { IPv4(65,2,224,0),19 },
+       { IPv4(65,3,0,0),18 },
+       { IPv4(65,3,64,0),19 },
+       { IPv4(65,3,96,0),19 },
+       { IPv4(65,3,128,0),17 },
+       { IPv4(65,3,192,0),19 },
+       { IPv4(65,3,224,0),19 },
+       { IPv4(65,4,0,0),16 },
+       { IPv4(65,5,0,0),17 },
+       { IPv4(65,5,128,0),17 },
+       { IPv4(65,6,0,0),15 },
+       { IPv4(65,8,0,0),17 },
+       { IPv4(65,8,0,0),14 },
+       { IPv4(65,8,128,0),17 },
+       { IPv4(65,9,0,0),16 },
+       { IPv4(65,10,0,0),17 },
+       { IPv4(65,10,96,0),19 },
+       { IPv4(65,10,128,0),18 },
+       { IPv4(65,10,192,0),19 },
+       { IPv4(65,10,224,0),19 },
+       { IPv4(65,11,0,0),18 },
+       { IPv4(65,11,64,0),19 },
+       { IPv4(65,11,96,0),19 },
+       { IPv4(65,11,128,0),18 },
+       { IPv4(65,11,192,0),18 },
+       { IPv4(65,12,0,0),17 },
+       { IPv4(65,12,128,0),17 },
+       { IPv4(65,13,0,0),17 },
+       { IPv4(65,13,128,0),18 },
+       { IPv4(65,13,192,0),18 },
+       { IPv4(65,14,0,0),17 },
+       { IPv4(65,14,128,0),17 },
+       { IPv4(65,15,0,0),17 },
+       { IPv4(65,15,128,0),17 },
+       { IPv4(65,21,128,0),18 },
+       { IPv4(65,24,0,0),17 },
+       { IPv4(65,24,0,0),18 },
+       { IPv4(65,24,64,0),18 },
+       { IPv4(65,24,128,0),18 },
+       { IPv4(65,24,192,0),18 },
+       { IPv4(65,24,200,0),21 },
+       { IPv4(65,24,208,0),21 },
+       { IPv4(65,24,216,0),22 },
+       { IPv4(65,24,236,0),22 },
+       { IPv4(65,24,240,0),22 },
+       { IPv4(65,25,0,0),17 },
+       { IPv4(65,25,128,0),19 },
+       { IPv4(65,25,160,0),19 },
+       { IPv4(65,25,192,0),18 },
+       { IPv4(65,26,0,0),17 },
+       { IPv4(65,26,128,0),20 },
+       { IPv4(65,26,144,0),20 },
+       { IPv4(65,26,160,0),20 },
+       { IPv4(65,26,176,0),20 },
+       { IPv4(65,26,192,0),18 },
+       { IPv4(65,27,0,0),17 },
+       { IPv4(65,27,80,0),20 },
+       { IPv4(65,27,120,0),21 },
+       { IPv4(65,27,128,0),17 },
+       { IPv4(65,28,0,0),17 },
+       { IPv4(65,28,0,0),14 },
+       { IPv4(65,28,128,0),20 },
+       { IPv4(65,28,144,0),20 },
+       { IPv4(65,28,160,0),19 },
+       { IPv4(65,28,192,0),19 },
+       { IPv4(65,28,224,0),19 },
+       { IPv4(65,29,0,0),18 },
+       { IPv4(65,29,64,0),19 },
+       { IPv4(65,29,96,0),19 },
+       { IPv4(65,29,128,0),18 },
+       { IPv4(65,29,192,0),19 },
+       { IPv4(65,29,224,0),19 },
+       { IPv4(65,30,0,0),18 },
+       { IPv4(65,30,128,0),18 },
+       { IPv4(65,30,192,0),19 },
+       { IPv4(65,30,224,0),19 },
+       { IPv4(65,31,0,0),19 },
+       { IPv4(65,31,32,0),19 },
+       { IPv4(65,31,64,0),20 },
+       { IPv4(65,31,80,0),20 },
+       { IPv4(65,31,96,0),19 },
+       { IPv4(65,31,128,0),18 },
+       { IPv4(65,31,192,0),20 },
+       { IPv4(65,31,224,0),20 },
+       { IPv4(65,31,240,0),20 },
+       { IPv4(65,32,0,0),17 },
+       { IPv4(65,32,128,0),18 },
+       { IPv4(65,32,192,0),18 },
+       { IPv4(65,33,0,0),17 },
+       { IPv4(65,33,128,0),18 },
+       { IPv4(65,33,192,0),18 },
+       { IPv4(65,34,0,0),20 },
+       { IPv4(65,34,16,0),20 },
+       { IPv4(65,34,32,0),20 },
+       { IPv4(65,34,48,0),20 },
+       { IPv4(65,34,64,0),18 },
+       { IPv4(65,34,128,0),18 },
+       { IPv4(65,34,192,0),18 },
+       { IPv4(65,35,0,0),18 },
+       { IPv4(65,35,64,0),19 },
+       { IPv4(65,35,96,0),19 },
+       { IPv4(65,35,128,0),17 },
+       { IPv4(65,42,208,0),22 },
+       { IPv4(65,45,0,0),17 },
+       { IPv4(65,45,0,0),16 },
+       { IPv4(65,45,128,0),21 },
+       { IPv4(65,45,128,0),18 },
+       { IPv4(65,54,128,0),19 },
+       { IPv4(65,54,160,0),19 },
+       { IPv4(65,54,192,0),19 },
+       { IPv4(65,54,224,0),19 },
+       { IPv4(65,64,176,0),20 },
+       { IPv4(65,65,32,0),20 },
+       { IPv4(65,65,224,0),20 },
+       { IPv4(65,67,64,0),20 },
+       { IPv4(65,68,96,0),20 },
+       { IPv4(65,68,160,0),20 },
+       { IPv4(65,68,160,0),22 },
+       { IPv4(65,68,252,0),24 },
+       { IPv4(65,68,253,0),24 },
+       { IPv4(65,69,224,0),20 },
+       { IPv4(65,70,224,0),20 },
+       { IPv4(65,71,160,0),20 },
+       { IPv4(65,76,0,0),16 },
+       { IPv4(65,88,0,0),24 },
+       { IPv4(65,88,0,0),14 },
+       { IPv4(65,88,9,0),24 },
+       { IPv4(65,88,10,0),23 },
+       { IPv4(65,88,22,0),24 },
+       { IPv4(65,88,62,0),24 },
+       { IPv4(65,88,80,0),22 },
+       { IPv4(65,88,84,0),22 },
+       { IPv4(65,88,88,0),22 },
+       { IPv4(65,88,96,0),20 },
+       { IPv4(65,88,112,0),22 },
+       { IPv4(65,88,116,0),22 },
+       { IPv4(65,88,125,0),24 },
+       { IPv4(65,88,168,0),21 },
+       { IPv4(65,88,176,0),20 },
+       { IPv4(65,88,192,0),21 },
+       { IPv4(65,88,200,0),22 },
+       { IPv4(65,88,204,0),23 },
+       { IPv4(65,88,206,0),24 },
+       { IPv4(65,88,207,0),25 },
+       { IPv4(65,88,214,0),24 },
+       { IPv4(65,88,216,0),22 },
+       { IPv4(65,88,240,0),20 },
+       { IPv4(65,89,5,0),24 },
+       { IPv4(65,89,14,0),23 },
+       { IPv4(65,89,32,0),21 },
+       { IPv4(65,89,40,0),22 },
+       { IPv4(65,89,44,0),22 },
+       { IPv4(65,89,48,0),21 },
+       { IPv4(65,89,56,0),22 },
+       { IPv4(65,89,60,0),22 },
+       { IPv4(65,89,80,0),20 },
+       { IPv4(65,89,96,0),20 },
+       { IPv4(65,89,128,0),22 },
+       { IPv4(65,89,150,0),24 },
+       { IPv4(65,89,156,0),23 },
+       { IPv4(65,89,166,192),26 },
+       { IPv4(65,89,204,0),24 },
+       { IPv4(65,89,220,0),22 },
+       { IPv4(65,89,240,0),21 },
+       { IPv4(65,89,250,0),23 },
+       { IPv4(65,90,56,0),21 },
+       { IPv4(65,90,64,0),21 },
+       { IPv4(65,90,72,0),21 },
+       { IPv4(65,90,80,0),21 },
+       { IPv4(65,90,88,0),21 },
+       { IPv4(65,90,144,0),20 },
+       { IPv4(65,90,177,0),24 },
+       { IPv4(65,90,208,0),20 },
+       { IPv4(65,96,0,0),19 },
+       { IPv4(65,96,32,0),19 },
+       { IPv4(65,96,64,0),18 },
+       { IPv4(65,96,128,0),17 },
+       { IPv4(65,97,0,0),19 },
+       { IPv4(65,100,53,0),24 },
+       { IPv4(65,100,54,0),24 },
+       { IPv4(65,104,0,0),14 },
+       { IPv4(65,105,159,0),24 },
+       { IPv4(65,105,191,0),24 },
+       { IPv4(65,105,218,0),24 },
+       { IPv4(65,105,227,0),24 },
+       { IPv4(65,105,236,0),24 },
+       { IPv4(65,106,136,0),24 },
+       { IPv4(65,106,164,0),24 },
+       { IPv4(65,106,171,0),24 },
+       { IPv4(65,112,27,0),24 },
+       { IPv4(65,112,28,0),23 },
+       { IPv4(65,112,31,0),24 },
+       { IPv4(65,112,122,0),23 },
+       { IPv4(65,112,125,0),24 },
+       { IPv4(65,112,126,0),24 },
+       { IPv4(65,112,127,0),24 },
+       { IPv4(65,112,196,0),24 },
+       { IPv4(65,112,198,0),24 },
+       { IPv4(65,112,199,0),24 },
+       { IPv4(65,112,206,0),24 },
+       { IPv4(65,112,216,0),24 },
+       { IPv4(65,112,241,0),24 },
+       { IPv4(65,112,246,0),23 },
+       { IPv4(65,112,255,0),24 },
+       { IPv4(65,113,8,0),24 },
+       { IPv4(65,113,10,0),24 },
+       { IPv4(65,113,11,0),24 },
+       { IPv4(65,113,12,0),23 },
+       { IPv4(65,113,14,0),23 },
+       { IPv4(65,113,45,0),24 },
+       { IPv4(65,113,124,0),22 },
+       { IPv4(65,113,220,0),22 },
+       { IPv4(65,113,224,0),24 },
+       { IPv4(65,113,227,0),24 },
+       { IPv4(65,113,229,0),24 },
+       { IPv4(65,113,230,0),24 },
+       { IPv4(65,113,236,0),22 },
+       { IPv4(65,113,240,0),24 },
+       { IPv4(65,113,242,0),24 },
+       { IPv4(65,113,244,0),23 },
+       { IPv4(65,114,9,0),24 },
+       { IPv4(65,114,27,0),24 },
+       { IPv4(65,114,50,0),24 },
+       { IPv4(65,114,51,0),24 },
+       { IPv4(65,114,194,0),23 },
+       { IPv4(65,114,200,0),24 },
+       { IPv4(65,114,201,0),24 },
+       { IPv4(65,114,202,0),24 },
+       { IPv4(65,114,203,0),24 },
+       { IPv4(65,114,204,0),24 },
+       { IPv4(65,114,205,0),24 },
+       { IPv4(65,114,210,0),24 },
+       { IPv4(65,114,211,0),24 },
+       { IPv4(65,114,213,0),24 },
+       { IPv4(65,114,214,0),24 },
+       { IPv4(65,114,215,0),24 },
+       { IPv4(65,114,216,0),23 },
+       { IPv4(65,114,220,0),24 },
+       { IPv4(65,114,221,0),24 },
+       { IPv4(65,114,222,0),24 },
+       { IPv4(65,114,223,0),24 },
+       { IPv4(65,114,227,0),24 },
+       { IPv4(65,114,228,0),24 },
+       { IPv4(65,114,229,0),24 },
+       { IPv4(65,114,230,0),24 },
+       { IPv4(65,114,232,0),24 },
+       { IPv4(65,114,234,0),24 },
+       { IPv4(65,114,239,0),24 },
+       { IPv4(65,114,240,0),24 },
+       { IPv4(65,114,241,0),24 },
+       { IPv4(65,114,244,0),23 },
+       { IPv4(65,114,247,0),24 },
+       { IPv4(65,114,248,0),23 },
+       { IPv4(65,114,250,0),24 },
+       { IPv4(65,115,54,0),24 },
+       { IPv4(65,115,174,0),24 },
+       { IPv4(65,115,238,0),24 },
+       { IPv4(65,116,66,0),24 },
+       { IPv4(65,116,67,0),24 },
+       { IPv4(65,116,68,0),24 },
+       { IPv4(65,116,76,0),24 },
+       { IPv4(65,116,77,0),24 },
+       { IPv4(65,116,78,0),24 },
+       { IPv4(65,116,183,0),24 },
+       { IPv4(65,116,186,0),24 },
+       { IPv4(65,116,228,0),23 },
+       { IPv4(65,116,240,0),24 },
+       { IPv4(65,116,242,0),24 },
+       { IPv4(65,116,243,0),24 },
+       { IPv4(65,117,80,0),24 },
+       { IPv4(65,117,81,0),24 },
+       { IPv4(65,117,86,0),24 },
+       { IPv4(65,117,87,0),24 },
+       { IPv4(65,117,88,0),21 },
+       { IPv4(65,117,102,0),24 },
+       { IPv4(65,117,103,0),24 },
+       { IPv4(65,117,104,0),21 },
+       { IPv4(65,117,150,0),24 },
+       { IPv4(65,117,242,0),23 },
+       { IPv4(65,117,244,0),24 },
+       { IPv4(65,117,245,0),24 },
+       { IPv4(65,117,247,0),24 },
+       { IPv4(65,117,249,0),24 },
+       { IPv4(65,117,252,0),24 },
+       { IPv4(65,117,253,0),24 },
+       { IPv4(65,120,230,0),24 },
+       { IPv4(65,120,231,0),24 },
+       { IPv4(65,120,240,0),23 },
+       { IPv4(65,120,242,0),23 },
+       { IPv4(65,121,0,0),23 },
+       { IPv4(65,121,4,0),24 },
+       { IPv4(65,121,8,0),23 },
+       { IPv4(65,121,18,0),24 },
+       { IPv4(65,121,28,0),24 },
+       { IPv4(65,121,33,0),24 },
+       { IPv4(65,121,34,0),24 },
+       { IPv4(65,121,86,0),24 },
+       { IPv4(65,160,176,0),20 },
+       { IPv4(65,160,224,0),20 },
+       { IPv4(65,161,12,0),23 },
+       { IPv4(65,161,42,0),24 },
+       { IPv4(65,161,43,0),24 },
+       { IPv4(65,161,122,0),23 },
+       { IPv4(65,161,192,0),24 },
+       { IPv4(65,163,56,0),24 },
+       { IPv4(65,163,129,0),24 },
+       { IPv4(65,163,182,0),24 },
+       { IPv4(65,163,226,0),24 },
+       { IPv4(65,163,227,0),24 },
+       { IPv4(65,164,116,0),22 },
+       { IPv4(65,164,118,0),24 },
+       { IPv4(65,164,145,0),24 },
+       { IPv4(65,164,236,0),22 },
+       { IPv4(65,165,67,0),24 },
+       { IPv4(65,165,127,0),24 },
+       { IPv4(65,165,134,0),23 },
+       { IPv4(65,166,123,0),24 },
+       { IPv4(65,166,147,0),24 },
+       { IPv4(65,166,233,0),24 },
+       { IPv4(65,167,179,0),24 },
+       { IPv4(65,168,39,0),24 },
+       { IPv4(65,168,204,0),22 },
+       { IPv4(65,170,140,0),22 },
+       { IPv4(65,170,225,0),24 },
+       { IPv4(65,171,16,0),21 },
+       { IPv4(65,174,28,0),22 },
+       { IPv4(65,174,51,0),24 },
+       { IPv4(65,174,154,0),23 },
+       { IPv4(65,192,32,0),24 },
+       { IPv4(65,192,36,0),23 },
+       { IPv4(65,193,3,0),24 },
+       { IPv4(65,193,19,0),24 },
+       { IPv4(65,193,164,0),22 },
+       { IPv4(65,193,252,0),22 },
+       { IPv4(65,194,128,0),21 },
+       { IPv4(65,194,184,0),22 },
+       { IPv4(65,195,9,0),24 },
+       { IPv4(65,195,12,0),24 },
+       { IPv4(65,195,32,0),21 },
+       { IPv4(65,195,209,0),24 },
+       { IPv4(65,195,211,0),24 },
+       { IPv4(65,196,66,0),23 },
+       { IPv4(65,197,21,0),24 },
+       { IPv4(65,197,91,0),24 },
+       { IPv4(65,197,177,0),24 },
+       { IPv4(65,197,236,0),22 },
+       { IPv4(65,198,132,0),23 },
+       { IPv4(65,198,187,0),24 },
+       { IPv4(65,198,197,0),24 },
+       { IPv4(65,198,198,0),24 },
+       { IPv4(65,198,219,0),24 },
+       { IPv4(65,198,220,0),23 },
+       { IPv4(65,199,0,0),21 },
+       { IPv4(65,199,16,0),24 },
+       { IPv4(65,199,17,0),24 },
+       { IPv4(65,199,18,0),24 },
+       { IPv4(65,199,19,0),24 },
+       { IPv4(65,199,28,0),24 },
+       { IPv4(65,199,44,0),24 },
+       { IPv4(65,199,145,0),24 },
+       { IPv4(65,199,148,0),24 },
+       { IPv4(65,199,149,0),24 },
+       { IPv4(65,199,213,0),24 },
+       { IPv4(65,200,30,0),24 },
+       { IPv4(65,200,122,0),24 },
+       { IPv4(65,201,12,0),22 },
+       { IPv4(65,201,209,0),24 },
+       { IPv4(65,202,11,0),24 },
+       { IPv4(65,202,30,0),24 },
+       { IPv4(65,202,64,0),22 },
+       { IPv4(65,202,115,0),24 },
+       { IPv4(65,202,192,0),23 },
+       { IPv4(65,203,43,0),24 },
+       { IPv4(65,204,41,0),24 },
+       { IPv4(65,204,80,0),24 },
+       { IPv4(65,204,150,0),24 },
+       { IPv4(65,204,186,0),24 },
+       { IPv4(65,205,141,0),24 },
+       { IPv4(65,205,160,0),22 },
+       { IPv4(65,205,191,0),24 },
+       { IPv4(65,205,248,0),22 },
+       { IPv4(65,206,228,0),22 },
+       { IPv4(65,207,56,0),21 },
+       { IPv4(65,208,24,0),22 },
+       { IPv4(65,208,97,0),24 },
+       { IPv4(65,210,129,0),24 },
+       { IPv4(65,210,176,0),20 },
+       { IPv4(65,211,151,0),24 },
+       { IPv4(66,1,224,0),20 },
+       { IPv4(66,2,0,0),17 },
+       { IPv4(66,2,128,0),18 },
+       { IPv4(66,3,0,0),17 },
+       { IPv4(66,3,32,0),20 },
+       { IPv4(66,3,128,0),18 },
+       { IPv4(66,3,192,0),18 },
+       { IPv4(66,4,0,0),17 },
+       { IPv4(66,4,128,0),17 },
+       { IPv4(66,5,0,0),17 },
+       { IPv4(66,5,128,0),17 },
+       { IPv4(66,6,0,0),20 },
+       { IPv4(66,6,96,0),19 },
+       { IPv4(66,6,160,0),23 },
+       { IPv4(66,6,160,0),20 },
+       { IPv4(66,6,162,0),24 },
+       { IPv4(66,6,163,0),24 },
+       { IPv4(66,6,164,0),24 },
+       { IPv4(66,6,165,0),24 },
+       { IPv4(66,7,128,0),24 },
+       { IPv4(66,7,142,0),24 },
+       { IPv4(66,7,145,0),24 },
+       { IPv4(66,7,160,0),24 },
+       { IPv4(66,7,191,0),24 },
+       { IPv4(66,7,224,0),20 },
+       { IPv4(66,8,128,0),19 },
+       { IPv4(66,8,160,0),20 },
+       { IPv4(66,8,176,0),20 },
+       { IPv4(66,8,192,0),20 },
+       { IPv4(66,8,208,0),20 },
+       { IPv4(66,8,224,0),20 },
+       { IPv4(66,8,240,0),20 },
+       { IPv4(66,9,0,0),16 },
+       { IPv4(66,21,4,0),24 },
+       { IPv4(66,24,0,0),18 },
+       { IPv4(66,24,64,0),18 },
+       { IPv4(66,24,128,0),19 },
+       { IPv4(66,24,160,0),20 },
+       { IPv4(66,24,176,0),20 },
+       { IPv4(66,24,192,0),19 },
+       { IPv4(66,24,224,0),19 },
+       { IPv4(66,25,0,0),18 },
+       { IPv4(66,25,64,0),19 },
+       { IPv4(66,25,96,0),19 },
+       { IPv4(66,25,128,0),17 },
+       { IPv4(66,26,0,0),20 },
+       { IPv4(66,26,16,0),20 },
+       { IPv4(66,26,32,0),19 },
+       { IPv4(66,26,64,0),19 },
+       { IPv4(66,26,96,0),20 },
+       { IPv4(66,26,112,0),20 },
+       { IPv4(66,26,128,0),19 },
+       { IPv4(66,26,160,0),20 },
+       { IPv4(66,26,176,0),20 },
+       { IPv4(66,26,192,0),19 },
+       { IPv4(66,26,224,0),19 },
+       { IPv4(66,27,0,0),20 },
+       { IPv4(66,27,16,0),20 },
+       { IPv4(66,27,32,0),20 },
+       { IPv4(66,27,48,0),20 },
+       { IPv4(66,27,64,0),18 },
+       { IPv4(66,27,128,0),19 },
+       { IPv4(66,27,160,0),19 },
+       { IPv4(66,27,192,0),19 },
+       { IPv4(66,27,224,0),20 },
+       { IPv4(66,27,240,0),20 },
+       { IPv4(66,28,0,0),17 },
+       { IPv4(66,28,13,0),24 },
+       { IPv4(66,28,15,0),24 },
+       { IPv4(66,28,17,0),24 },
+       { IPv4(66,30,0,0),19 },
+       { IPv4(66,30,32,0),20 },
+       { IPv4(66,30,48,0),20 },
+       { IPv4(66,30,64,0),18 },
+       { IPv4(66,30,128,0),18 },
+       { IPv4(66,30,192,0),18 },
+       { IPv4(66,31,0,0),16 },
+       { IPv4(66,32,4,0),22 },
+       { IPv4(66,32,32,0),21 },
+       { IPv4(66,32,112,0),20 },
+       { IPv4(66,32,136,0),22 },
+       { IPv4(66,33,0,0),17 },
+       { IPv4(66,33,128,0),19 },
+       { IPv4(66,35,64,0),19 },
+       { IPv4(66,35,68,0),22 },
+       { IPv4(66,35,72,0),23 },
+       { IPv4(66,35,78,0),24 },
+       { IPv4(66,36,0,0),20 },
+       { IPv4(66,37,128,0),20 },
+       { IPv4(66,37,160,0),20 },
+       { IPv4(66,37,172,0),24 },
+       { IPv4(66,37,173,0),24 },
+       { IPv4(66,37,224,0),20 },
+       { IPv4(66,38,0,0),24 },
+       { IPv4(66,38,0,0),20 },
+       { IPv4(66,38,1,0),24 },
+       { IPv4(66,38,2,0),24 },
+       { IPv4(66,38,3,0),24 },
+       { IPv4(66,38,4,0),22 },
+       { IPv4(66,38,8,0),22 },
+       { IPv4(66,38,12,0),22 },
+       { IPv4(66,38,16,0),20 },
+       { IPv4(66,38,16,0),22 },
+       { IPv4(66,38,20,0),22 },
+       { IPv4(66,38,24,0),22 },
+       { IPv4(66,38,28,0),24 },
+       { IPv4(66,38,28,0),22 },
+       { IPv4(66,38,29,0),24 },
+       { IPv4(66,38,30,0),24 },
+       { IPv4(66,38,31,0),24 },
+       { IPv4(66,38,32,0),20 },
+       { IPv4(66,38,32,0),21 },
+       { IPv4(66,38,32,0),22 },
+       { IPv4(66,38,36,0),22 },
+       { IPv4(66,38,40,0),22 },
+       { IPv4(66,38,44,0),23 },
+       { IPv4(66,38,44,0),22 },
+       { IPv4(66,38,46,0),23 },
+       { IPv4(66,38,48,0),24 },
+       { IPv4(66,38,48,0),22 },
+       { IPv4(66,38,48,0),20 },
+       { IPv4(66,38,52,0),24 },
+       { IPv4(66,38,54,0),23 },
+       { IPv4(66,38,63,0),24 },
+       { IPv4(66,38,181,0),24 },
+       { IPv4(66,38,182,0),24 },
+       { IPv4(66,39,0,0),17 },
+       { IPv4(66,40,0,0),18 },
+       { IPv4(66,40,64,0),19 },
+       { IPv4(66,40,80,0),20 },
+       { IPv4(66,40,96,0),21 },
+       { IPv4(66,40,96,0),20 },
+       { IPv4(66,40,104,0),21 },
+       { IPv4(66,40,112,0),20 },
+       { IPv4(66,40,128,0),17 },
+       { IPv4(66,40,248,0),21 },
+       { IPv4(66,41,0,0),19 },
+       { IPv4(66,41,32,0),19 },
+       { IPv4(66,41,80,0),20 },
+       { IPv4(66,41,96,0),19 },
+       { IPv4(66,41,128,0),20 },
+       { IPv4(66,41,144,0),20 },
+       { IPv4(66,41,160,0),19 },
+       { IPv4(66,41,192,0),18 },
+       { IPv4(66,42,32,0),20 },
+       { IPv4(66,43,192,0),18 },
+       { IPv4(66,44,0,0),17 },
+       { IPv4(66,45,0,0),17 },
+       { IPv4(66,45,0,0),20 },
+       { IPv4(66,45,0,0),23 },
+       { IPv4(66,45,0,0),18 },
+       { IPv4(66,45,2,0),23 },
+       { IPv4(66,45,8,0),24 },
+       { IPv4(66,45,9,0),24 },
+       { IPv4(66,45,10,0),23 },
+       { IPv4(66,45,12,0),24 },
+       { IPv4(66,45,13,0),24 },
+       { IPv4(66,45,14,0),24 },
+       { IPv4(66,45,15,0),24 },
+       { IPv4(66,45,16,0),22 },
+       { IPv4(66,45,16,0),20 },
+       { IPv4(66,45,20,0),22 },
+       { IPv4(66,45,24,0),24 },
+       { IPv4(66,45,25,0),24 },
+       { IPv4(66,45,26,0),23 },
+       { IPv4(66,45,28,0),22 },
+       { IPv4(66,45,32,0),22 },
+       { IPv4(66,45,32,0),20 },
+       { IPv4(66,45,36,0),23 },
+       { IPv4(66,45,38,0),23 },
+       { IPv4(66,45,41,0),24 },
+       { IPv4(66,45,42,0),24 },
+       { IPv4(66,45,43,0),24 },
+       { IPv4(66,45,44,0),22 },
+       { IPv4(66,45,48,0),24 },
+       { IPv4(66,45,49,0),24 },
+       { IPv4(66,45,50,0),23 },
+       { IPv4(66,45,52,0),22 },
+       { IPv4(66,45,56,0),23 },
+       { IPv4(66,45,59,0),24 },
+       { IPv4(66,45,60,0),22 },
+       { IPv4(66,45,64,0),21 },
+       { IPv4(66,45,64,0),19 },
+       { IPv4(66,45,72,0),24 },
+       { IPv4(66,45,73,0),24 },
+       { IPv4(66,45,74,0),24 },
+       { IPv4(66,45,75,0),24 },
+       { IPv4(66,45,76,0),23 },
+       { IPv4(66,45,78,0),24 },
+       { IPv4(66,45,80,0),24 },
+       { IPv4(66,45,81,0),24 },
+       { IPv4(66,45,82,0),23 },
+       { IPv4(66,45,84,0),24 },
+       { IPv4(66,45,86,0),23 },
+       { IPv4(66,45,88,0),21 },
+       { IPv4(66,45,96,0),20 },
+       { IPv4(66,45,102,0),23 },
+       { IPv4(66,45,104,0),21 },
+       { IPv4(66,45,112,0),22 },
+       { IPv4(66,45,116,0),23 },
+       { IPv4(66,45,120,0),21 },
+       { IPv4(66,46,0,0),16 },
+       { IPv4(66,46,144,0),21 },
+       { IPv4(66,47,4,0),22 },
+       { IPv4(66,47,40,0),22 },
+       { IPv4(66,47,144,0),22 },
+       { IPv4(66,47,148,0),22 },
+       { IPv4(66,47,156,0),22 },
+       { IPv4(66,47,160,0),20 },
+       { IPv4(66,47,188,0),22 },
+       { IPv4(66,47,224,0),20 },
+       { IPv4(66,47,240,0),22 },
+       { IPv4(66,51,7,0),24 },
+       { IPv4(66,51,8,0),24 },
+       { IPv4(66,51,9,0),24 },
+       { IPv4(66,51,10,0),24 },
+       { IPv4(66,51,11,0),24 },
+       { IPv4(66,51,24,0),23 },
+       { IPv4(66,51,26,0),23 },
+       { IPv4(66,51,28,0),24 },
+       { IPv4(66,51,30,0),24 },
+       { IPv4(66,51,32,0),20 },
+       { IPv4(66,51,64,0),20 },
+       { IPv4(66,51,80,0),22 },
+       { IPv4(66,52,192,0),18 },
+       { IPv4(66,53,32,0),19 },
+       { IPv4(66,53,64,0),19 },
+       { IPv4(66,54,154,0),24 },
+       { IPv4(66,54,155,0),24 },
+       { IPv4(66,54,186,0),24 },
+       { IPv4(66,54,193,0),24 },
+       { IPv4(66,54,200,0),21 },
+       { IPv4(66,54,209,0),24 },
+       { IPv4(66,55,0,0),18 },
+       { IPv4(66,56,0,0),18 },
+       { IPv4(66,56,64,0),19 },
+       { IPv4(66,56,96,0),20 },
+       { IPv4(66,56,96,0),19 },
+       { IPv4(66,56,112,0),20 },
+       { IPv4(66,56,128,0),19 },
+       { IPv4(66,56,160,0),19 },
+       { IPv4(66,56,192,0),19 },
+       { IPv4(66,56,224,0),19 },
+       { IPv4(66,57,0,0),19 },
+       { IPv4(66,57,32,0),19 },
+       { IPv4(66,57,32,0),20 },
+       { IPv4(66,57,48,0),20 },
+       { IPv4(66,57,64,0),19 },
+       { IPv4(66,57,96,0),19 },
+       { IPv4(66,57,128,0),19 },
+       { IPv4(66,57,160,0),19 },
+       { IPv4(66,57,192,0),20 },
+       { IPv4(66,61,0,0),20 },
+       { IPv4(66,61,16,0),20 },
+       { IPv4(66,61,32,0),19 },
+       { IPv4(66,61,64,0),18 },
+       { IPv4(66,61,128,0),19 },
+       { IPv4(66,61,144,0),20 },
+       { IPv4(66,61,152,0),21 },
+       { IPv4(66,61,160,0),19 },
+       { IPv4(66,62,0,0),16 },
+       { IPv4(66,62,55,0),24 },
+       { IPv4(66,62,61,0),24 },
+       { IPv4(66,62,224,0),19 },
+       { IPv4(66,62,245,0),24 },
+       { IPv4(66,65,0,0),18 },
+       { IPv4(66,65,64,0),19 },
+       { IPv4(66,65,96,0),20 },
+       { IPv4(66,65,112,0),20 },
+       { IPv4(66,66,0,0),18 },
+       { IPv4(66,66,64,0),18 },
+       { IPv4(66,66,128,0),18 },
+       { IPv4(66,66,192,0),18 },
+       { IPv4(66,67,0,0),19 },
+       { IPv4(66,67,32,0),20 },
+       { IPv4(66,67,48,0),20 },
+       { IPv4(66,67,64,0),18 },
+       { IPv4(66,68,0,0),17 },
+       { IPv4(66,68,128,0),18 },
+       { IPv4(66,69,0,0),17 },
+       { IPv4(66,69,128,0),18 },
+       { IPv4(66,69,192,0),18 },
+       { IPv4(66,70,0,0),18 },
+       { IPv4(66,70,0,0),17 },
+       { IPv4(66,70,58,0),23 },
+       { IPv4(66,70,64,0),18 },
+       { IPv4(66,70,120,0),22 },
+       { IPv4(66,70,152,0),21 },
+       { IPv4(66,70,176,0),22 },
+       { IPv4(66,70,185,0),24 },
+       { IPv4(66,70,188,0),22 },
+       { IPv4(66,70,216,0),21 },
+       { IPv4(66,71,0,0),17 },
+       { IPv4(66,71,128,0),18 },
+       { IPv4(66,74,0,0),18 },
+       { IPv4(66,74,64,0),19 },
+       { IPv4(66,74,96,0),20 },
+       { IPv4(66,74,112,0),20 },
+       { IPv4(66,74,128,0),18 },
+       { IPv4(66,74,192,0),19 },
+       { IPv4(66,74,224,0),20 },
+       { IPv4(66,74,240,0),20 },
+       { IPv4(66,75,0,0),19 },
+       { IPv4(66,75,32,0),19 },
+       { IPv4(66,75,64,0),19 },
+       { IPv4(66,75,96,0),19 },
+       { IPv4(66,75,128,0),19 },
+       { IPv4(66,75,160,0),20 },
+       { IPv4(66,75,176,0),20 },
+       { IPv4(66,75,192,0),19 },
+       { IPv4(66,76,0,0),18 },
+       { IPv4(66,76,64,0),20 },
+       { IPv4(66,76,80,0),20 },
+       { IPv4(66,76,96,0),20 },
+       { IPv4(66,76,112,0),20 },
+       { IPv4(66,76,128,0),20 },
+       { IPv4(66,76,160,0),20 },
+       { IPv4(66,76,176,0),20 },
+       { IPv4(66,77,32,0),21 },
+       { IPv4(66,77,34,0),24 },
+       { IPv4(66,77,36,0),23 },
+       { IPv4(66,77,38,0),24 },
+       { IPv4(66,79,128,0),19 },
+       { IPv4(66,79,129,0),24 },
+       { IPv4(66,79,132,0),24 },
+       { IPv4(66,79,133,0),24 },
+       { IPv4(66,79,135,0),24 },
+       { IPv4(66,79,136,0),24 },
+       { IPv4(66,81,0,0),17 },
+       { IPv4(66,82,0,0),19 },
+       { IPv4(66,87,32,0),20 },
+       { IPv4(66,87,48,0),20 },
+       { IPv4(66,87,128,0),20 },
+       { IPv4(66,87,208,0),20 },
+       { IPv4(66,88,0,0),15 },
+       { IPv4(66,90,0,0),21 },
+       { IPv4(66,91,0,0),18 },
+       { IPv4(66,91,64,0),19 },
+       { IPv4(66,91,96,0),20 },
+       { IPv4(66,92,0,0),19 },
+       { IPv4(66,92,20,0),22 },
+       { IPv4(66,92,32,0),19 },
+       { IPv4(66,92,64,0),19 },
+       { IPv4(66,92,96,0),19 },
+       { IPv4(66,92,128,0),20 },
+       { IPv4(66,92,144,0),20 },
+       { IPv4(66,92,160,0),20 },
+       { IPv4(66,92,176,0),20 },
+       { IPv4(66,92,192,0),22 },
+       { IPv4(66,92,196,0),22 },
+       { IPv4(66,92,200,0),22 },
+       { IPv4(66,92,204,0),22 },
+       { IPv4(66,92,208,0),22 },
+       { IPv4(66,92,216,0),21 },
+       { IPv4(66,92,240,0),21 },
+       { IPv4(66,92,248,0),22 },
+       { IPv4(66,92,252,0),22 },
+       { IPv4(66,95,0,0),17 },
+       { IPv4(66,95,128,0),19 },
+       { IPv4(66,96,0,0),20 },
+       { IPv4(66,96,192,0),18 },
+       { IPv4(66,99,0,0),16 },
+       { IPv4(66,100,104,0),22 },
+       { IPv4(66,100,108,0),23 },
+       { IPv4(66,101,32,0),20 },
+       { IPv4(66,105,0,0),16 },
+       { IPv4(66,106,0,0),15 },
+       { IPv4(66,108,0,0),17 },
+       { IPv4(66,108,128,0),17 },
+       { IPv4(66,109,160,0),20 },
+       { IPv4(66,109,192,0),20 },
+       { IPv4(66,110,28,0),23 },
+       { IPv4(66,111,192,0),19 },
+       { IPv4(66,111,224,0),19 },
+       { IPv4(66,113,0,0),19 },
+       { IPv4(66,114,64,0),20 },
+       { IPv4(66,114,96,0),20 },
+       { IPv4(66,114,128,0),19 },
+       { IPv4(66,115,128,0),18 },
+       { IPv4(66,118,64,0),19 },
+       { IPv4(66,118,80,0),20 },
+       { IPv4(66,118,192,0),19 },
+       { IPv4(66,119,192,0),19 },
+       { IPv4(66,119,196,0),22 },
+       { IPv4(66,119,200,0),22 },
+       { IPv4(66,119,208,0),22 },
+       { IPv4(66,121,192,0),20 },
+       { IPv4(66,122,64,0),20 },
+       { IPv4(66,122,164,0),24 },
+       { IPv4(66,128,2,0),24 },
+       { IPv4(66,128,96,0),20 },
+       { IPv4(66,128,160,0),20 },
+       { IPv4(66,129,64,0),20 },
+       { IPv4(66,129,80,0),20 },
+       { IPv4(66,129,192,0),19 },
+       { IPv4(66,130,0,0),17 },
+       { IPv4(66,133,0,0),18 },
+       { IPv4(66,133,4,0),24 },
+       { IPv4(66,133,21,0),24 },
+       { IPv4(66,135,128,0),20 },
+       { IPv4(66,135,224,0),20 },
+       { IPv4(66,137,176,0),20 },
+       { IPv4(66,144,0,0),15 },
+       { IPv4(66,149,0,0),17 },
+       { IPv4(66,149,64,0),20 },
+       { IPv4(66,149,112,0),22 },
+       { IPv4(66,149,120,0),22 },
+       { IPv4(66,150,0,0),20 },
+       { IPv4(66,150,5,0),24 },
+       { IPv4(66,150,14,0),24 },
+       { IPv4(66,150,16,0),20 },
+       { IPv4(66,150,48,0),20 },
+       { IPv4(66,150,64,0),21 },
+       { IPv4(66,150,64,0),20 },
+       { IPv4(66,150,96,0),20 },
+       { IPv4(66,150,112,0),20 },
+       { IPv4(66,150,128,0),20 },
+       { IPv4(66,150,144,0),20 },
+       { IPv4(66,152,128,0),19 },
+       { IPv4(66,153,128,0),18 },
+       { IPv4(66,153,192,0),20 },
+       { IPv4(66,154,128,0),17 },
+       { IPv4(66,155,0,0),17 },
+       { IPv4(66,158,0,0),17 },
+       { IPv4(66,161,128,0),18 },
+       { IPv4(66,161,138,0),23 },
+       { IPv4(66,162,33,0),24 },
+       { IPv4(66,163,224,0),20 },
+       { IPv4(66,164,0,0),24 },
+       { IPv4(66,164,1,0),24 },
+       { IPv4(66,164,2,0),24 },
+       { IPv4(66,164,4,0),24 },
+       { IPv4(66,164,5,0),24 },
+       { IPv4(66,164,7,0),24 },
+       { IPv4(66,164,200,0),21 },
+       { IPv4(66,164,208,0),21 },
+       { IPv4(66,164,240,0),20 },
+       { IPv4(66,168,32,0),24 },
+       { IPv4(66,168,38,0),23 },
+       { IPv4(66,168,80,0),20 },
+       { IPv4(66,170,96,0),20 },
+       { IPv4(66,175,0,0),18 },
+       { IPv4(66,177,0,0),17 },
+       { IPv4(66,177,128,0),18 },
+       { IPv4(66,177,192,0),19 },
+       { IPv4(66,179,0,0),18 },
+       { IPv4(66,179,0,0),23 },
+       { IPv4(66,179,4,0),22 },
+       { IPv4(66,179,64,0),19 },
+       { IPv4(66,179,96,0),20 },
+       { IPv4(66,180,32,0),20 },
+       { IPv4(66,180,192,0),20 },
+       { IPv4(67,0,0,0),16 },
+       { IPv4(67,8,0,0),19 },
+       { IPv4(67,8,32,0),20 },
+       { IPv4(67,89,0,0),17 },
+       { IPv4(67,89,128,0),18 },
+       { IPv4(67,96,0,0),19 },
+       { IPv4(67,96,0,0),14 },
+       { IPv4(67,96,86,0),24 },
+       { IPv4(67,96,87,0),24 },
+       { IPv4(67,96,88,0),23 },
+       { IPv4(67,96,96,0),21 },
+       { IPv4(67,96,224,0),21 },
+       { IPv4(67,97,64,0),20 },
+       { IPv4(67,97,144,0),21 },
+       { IPv4(67,97,152,0),21 },
+       { IPv4(67,97,160,0),20 },
+       { IPv4(67,97,176,0),21 },
+       { IPv4(67,104,0,0),15 },
+       { IPv4(67,105,4,0),23 },
+       { IPv4(67,160,0,0),16 },
+       { IPv4(67,160,0,0),13 },
+       { IPv4(67,161,0,0),17 },
+       { IPv4(67,161,128,0),17 },
+       { IPv4(67,162,0,0),16 },
+       { IPv4(67,163,0,0),17 },
+       { IPv4(67,163,128,0),17 },
+       { IPv4(67,164,0,0),15 },
+       { IPv4(67,166,0,0),17 },
+       { IPv4(67,166,192,0),18 },
+       { IPv4(67,167,0,0),17 },
+       { IPv4(67,167,128,0),17 },
+       { IPv4(80,0,0,0),13 },
+       { IPv4(80,60,0,0),15 },
+       { IPv4(80,64,32,0),20 },
+       { IPv4(80,65,96,0),20 },
+       { IPv4(80,66,224,0),20 },
+       { IPv4(80,67,168,0),21 },
+       { IPv4(80,68,128,0),20 },
+       { IPv4(80,69,64,0),20 },
+       { IPv4(80,71,64,0),20 },
+       { IPv4(80,72,96,0),20 },
+       { IPv4(80,72,160,0),24 },
+       { IPv4(80,74,128,0),20 },
+       { IPv4(80,75,64,0),20 },
+       { IPv4(80,76,160,0),20 },
+       { IPv4(80,78,32,0),20 },
+       { IPv4(80,78,160,0),20 },
+       { IPv4(80,78,224,0),20 },
+       { IPv4(80,79,160,0),20 },
+       { IPv4(80,79,224,0),20 },
+       { IPv4(80,81,96,0),20 },
+       { IPv4(80,84,160,0),22 },
+       { IPv4(80,86,32,0),20 },
+       { IPv4(80,88,192,0),20 },
+       { IPv4(80,90,128,0),20 },
+       { IPv4(80,91,128,0),20 },
+       { IPv4(80,94,192,0),24 },
+       { IPv4(80,96,3,0),24 },
+       { IPv4(80,96,8,0),24 },
+       { IPv4(80,96,128,0),24 },
+       { IPv4(80,96,148,0),24 },
+       { IPv4(80,96,184,0),24 },
+       { IPv4(80,192,0,0),14 },
+       { IPv4(127,0,0,0),8 },
+       { IPv4(128,2,0,0),16 },
+       { IPv4(128,3,0,0),16 },
+       { IPv4(128,6,0,0),16 },
+       { IPv4(128,15,0,0),16 },
+       { IPv4(128,19,0,0),16 },
+       { IPv4(128,23,0,0),16 },
+       { IPv4(128,32,0,0),16 },
+       { IPv4(128,37,0,0),16 },
+       { IPv4(128,38,0,0),16 },
+       { IPv4(128,47,0,0),16 },
+       { IPv4(128,48,0,0),16 },
+       { IPv4(128,49,0,0),16 },
+       { IPv4(128,54,0,0),16 },
+       { IPv4(128,55,0,0),16 },
+       { IPv4(128,56,0,0),16 },
+       { IPv4(128,59,0,0),16 },
+       { IPv4(128,60,0,0),16 },
+       { IPv4(128,61,0,0),16 },
+       { IPv4(128,62,0,0),16 },
+       { IPv4(128,63,0,0),16 },
+       { IPv4(128,64,32,0),24 },
+       { IPv4(128,64,148,0),22 },
+       { IPv4(128,64,164,0),23 },
+       { IPv4(128,64,192,0),23 },
+       { IPv4(128,64,203,0),24 },
+       { IPv4(128,64,250,0),24 },
+       { IPv4(128,64,251,0),24 },
+       { IPv4(128,83,0,0),16 },
+       { IPv4(128,84,0,0),16 },
+       { IPv4(128,88,0,0),16 },
+       { IPv4(128,91,0,0),16 },
+       { IPv4(128,97,0,0),16 },
+       { IPv4(128,101,0,0),16 },
+       { IPv4(128,102,0,0),16 },
+       { IPv4(128,102,18,0),24 },
+       { IPv4(128,104,25,0),24 },
+       { IPv4(128,107,0,0),16 },
+       { IPv4(128,110,0,0),16 },
+       { IPv4(128,111,0,0),16 },
+       { IPv4(128,112,0,0),16 },
+       { IPv4(128,113,0,0),16 },
+       { IPv4(128,114,0,0),16 },
+       { IPv4(128,115,0,0),16 },
+       { IPv4(128,116,0,0),16 },
+       { IPv4(128,117,0,0),16 },
+       { IPv4(128,118,0,0),16 },
+       { IPv4(128,120,0,0),16 },
+       { IPv4(128,121,0,0),16 },
+       { IPv4(128,122,0,0),16 },
+       { IPv4(128,129,0,0),16 },
+       { IPv4(128,132,0,0),16 },
+       { IPv4(128,134,0,0),16 },
+       { IPv4(128,134,20,0),24 },
+       { IPv4(128,134,21,0),24 },
+       { IPv4(128,134,37,0),24 },
+       { IPv4(128,134,38,0),24 },
+       { IPv4(128,134,39,0),24 },
+       { IPv4(128,134,75,0),24 },
+       { IPv4(128,134,76,0),24 },
+       { IPv4(128,134,85,0),24 },
+       { IPv4(128,134,86,0),24 },
+       { IPv4(128,134,87,0),24 },
+       { IPv4(128,134,88,0),24 },
+       { IPv4(128,134,89,0),24 },
+       { IPv4(128,134,90,0),24 },
+       { IPv4(128,134,91,0),24 },
+       { IPv4(128,134,92,0),24 },
+       { IPv4(128,134,93,0),24 },
+       { IPv4(128,134,94,0),24 },
+       { IPv4(128,134,126,0),24 },
+       { IPv4(128,134,127,0),24 },
+       { IPv4(128,134,135,0),24 },
+       { IPv4(128,134,148,0),24 },
+       { IPv4(128,134,149,0),24 },
+       { IPv4(128,134,150,0),24 },
+       { IPv4(128,134,154,0),24 },
+       { IPv4(128,134,170,0),24 },
+       { IPv4(128,134,225,0),24 },
+       { IPv4(128,138,0,0),16 },
+       { IPv4(128,147,0,0),16 },
+       { IPv4(128,149,0,0),16 },
+       { IPv4(128,151,0,0),16 },
+       { IPv4(128,152,0,0),16 },
+       { IPv4(128,153,0,0),16 },
+       { IPv4(128,154,0,0),16 },
+       { IPv4(128,155,0,0),16 },
+       { IPv4(128,156,0,0),16 },
+       { IPv4(128,157,0,0),16 },
+       { IPv4(128,158,0,0),16 },
+       { IPv4(128,159,0,0),16 },
+       { IPv4(128,160,0,0),16 },
+       { IPv4(128,162,0,0),16 },
+       { IPv4(128,163,0,0),16 },
+       { IPv4(128,164,0,0),16 },
+       { IPv4(128,165,0,0),16 },
+       { IPv4(128,170,0,0),16 },
+       { IPv4(128,174,0,0),16 },
+       { IPv4(128,177,0,0),16 },
+       { IPv4(128,177,208,0),20 },
+       { IPv4(128,177,246,0),24 },
+       { IPv4(128,177,248,0),24 },
+       { IPv4(128,180,0,0),16 },
+       { IPv4(128,182,0,0),16 },
+       { IPv4(128,182,64,0),18 },
+       { IPv4(128,183,0,0),16 },
+       { IPv4(128,187,0,0),16 },
+       { IPv4(128,190,0,0),16 },
+       { IPv4(128,190,132,0),24 },
+       { IPv4(128,190,161,0),26 },
+       { IPv4(128,190,203,0),27 },
+       { IPv4(128,190,250,0),24 },
+       { IPv4(128,192,0,0),16 },
+       { IPv4(128,195,0,0),16 },
+       { IPv4(128,196,0,0),16 },
+       { IPv4(128,198,0,0),16 },
+       { IPv4(128,200,0,0),16 },
+       { IPv4(128,202,0,0),16 },
+       { IPv4(128,205,0,0),16 },
+       { IPv4(128,206,0,0),16 },
+       { IPv4(128,209,0,0),16 },
+       { IPv4(128,213,0,0),16 },
+       { IPv4(128,217,0,0),16 },
+       { IPv4(128,218,0,0),16 },
+       { IPv4(128,219,0,0),16 },
+       { IPv4(128,220,0,0),16 },
+       { IPv4(128,226,0,0),16 },
+       { IPv4(128,228,0,0),16 },
+       { IPv4(128,230,0,0),16 },
+       { IPv4(128,236,0,0),16 },
+       { IPv4(128,237,0,0),16 },
+       { IPv4(128,238,0,0),16 },
+       { IPv4(128,241,0,0),16 },
+       { IPv4(128,242,0,0),16 },
+       { IPv4(128,242,192,0),18 },
+       { IPv4(128,246,0,0),16 },
+       { IPv4(128,248,0,0),16 },
+       { IPv4(128,252,0,0),16 },
+       { IPv4(128,253,0,0),16 },
+       { IPv4(128,255,0,0),16 },
+       { IPv4(129,3,0,0),16 },
+       { IPv4(129,8,0,0),16 },
+       { IPv4(129,9,0,0),16 },
+       { IPv4(129,17,0,0),16 },
+       { IPv4(129,19,0,0),16 },
+       { IPv4(129,21,0,0),16 },
+       { IPv4(129,29,0,0),16 },
+       { IPv4(129,30,0,0),16 },
+       { IPv4(129,33,0,0),19 },
+       { IPv4(129,33,0,0),16 },
+       { IPv4(129,33,32,0),19 },
+       { IPv4(129,33,64,0),19 },
+       { IPv4(129,33,96,0),19 },
+       { IPv4(129,33,128,0),19 },
+       { IPv4(129,33,160,0),19 },
+       { IPv4(129,33,224,0),20 },
+       { IPv4(129,33,224,0),19 },
+       { IPv4(129,35,0,0),16 },
+       { IPv4(129,35,40,0),21 },
+       { IPv4(129,35,64,0),22 },
+       { IPv4(129,35,65,0),24 },
+       { IPv4(129,35,68,0),22 },
+       { IPv4(129,35,72,0),22 },
+       { IPv4(129,35,76,0),22 },
+       { IPv4(129,35,96,0),20 },
+       { IPv4(129,35,128,0),20 },
+       { IPv4(129,35,160,0),22 },
+       { IPv4(129,35,160,0),20 },
+       { IPv4(129,35,192,0),21 },
+       { IPv4(129,35,224,0),21 },
+       { IPv4(129,35,232,0),22 },
+       { IPv4(129,37,0,0),16 },
+       { IPv4(129,37,25,0),24 },
+       { IPv4(129,37,37,0),24 },
+       { IPv4(129,37,40,0),24 },
+       { IPv4(129,37,70,0),24 },
+       { IPv4(129,37,78,0),24 },
+       { IPv4(129,37,81,0),24 },
+       { IPv4(129,37,95,0),24 },
+       { IPv4(129,37,97,0),24 },
+       { IPv4(129,37,109,0),24 },
+       { IPv4(129,37,112,0),24 },
+       { IPv4(129,37,136,0),21 },
+       { IPv4(129,37,144,0),20 },
+       { IPv4(129,37,152,0),24 },
+       { IPv4(129,37,160,0),20 },
+       { IPv4(129,37,176,0),22 },
+       { IPv4(129,37,180,0),23 },
+       { IPv4(129,37,184,0),24 },
+       { IPv4(129,37,204,0),24 },
+       { IPv4(129,37,243,0),24 },
+       { IPv4(129,37,254,0),24 },
+       { IPv4(129,41,32,0),20 },
+       { IPv4(129,41,80,0),20 },
+       { IPv4(129,41,192,0),20 },
+       { IPv4(129,41,208,0),20 },
+       { IPv4(129,42,0,0),16 },
+       { IPv4(129,42,1,0),24 },
+       { IPv4(129,42,2,0),24 },
+       { IPv4(129,42,3,0),24 },
+       { IPv4(129,42,4,0),24 },
+       { IPv4(129,42,8,0),24 },
+       { IPv4(129,42,9,0),24 },
+       { IPv4(129,42,10,0),24 },
+       { IPv4(129,42,14,0),24 },
+       { IPv4(129,42,16,0),24 },
+       { IPv4(129,42,17,0),24 },
+       { IPv4(129,42,18,0),24 },
+       { IPv4(129,42,19,0),24 },
+       { IPv4(129,42,20,0),24 },
+       { IPv4(129,42,21,0),24 },
+       { IPv4(129,42,24,0),24 },
+       { IPv4(129,42,26,0),24 },
+       { IPv4(129,42,36,0),24 },
+       { IPv4(129,42,37,0),24 },
+       { IPv4(129,42,38,0),24 },
+       { IPv4(129,42,39,0),24 },
+       { IPv4(129,42,40,0),24 },
+       { IPv4(129,42,41,0),24 },
+       { IPv4(129,42,42,0),24 },
+       { IPv4(129,42,43,0),24 },
+       { IPv4(129,42,44,0),24 },
+       { IPv4(129,42,45,0),24 },
+       { IPv4(129,42,46,0),24 },
+       { IPv4(129,42,47,0),24 },
+       { IPv4(129,42,48,0),24 },
+       { IPv4(129,42,50,0),24 },
+       { IPv4(129,42,52,0),24 },
+       { IPv4(129,42,53,0),24 },
+       { IPv4(129,42,54,0),24 },
+       { IPv4(129,42,56,0),24 },
+       { IPv4(129,42,57,0),24 },
+       { IPv4(129,42,59,0),24 },
+       { IPv4(129,42,208,0),24 },
+       { IPv4(129,42,240,0),24 },
+       { IPv4(129,42,241,0),24 },
+       { IPv4(129,42,242,0),24 },
+       { IPv4(129,42,243,0),24 },
+       { IPv4(129,42,244,0),24 },
+       { IPv4(129,42,246,0),24 },
+       { IPv4(129,46,0,0),16 },
+       { IPv4(129,48,0,0),16 },
+       { IPv4(129,49,0,0),16 },
+       { IPv4(129,51,0,0),16 },
+       { IPv4(129,52,0,0),16 },
+       { IPv4(129,53,0,0),16 },
+       { IPv4(129,54,0,0),16 },
+       { IPv4(129,57,0,0),16 },
+       { IPv4(129,59,0,0),16 },
+       { IPv4(129,61,0,0),16 },
+       { IPv4(129,62,0,0),16 },
+       { IPv4(129,65,0,0),16 },
+       { IPv4(129,72,0,0),16 },
+       { IPv4(129,73,0,0),16 },
+       { IPv4(129,79,0,0),16 },
+       { IPv4(129,81,0,0),16 },
+       { IPv4(129,82,0,0),16 },
+       { IPv4(129,85,0,0),16 },
+       { IPv4(129,92,0,0),16 },
+       { IPv4(129,98,0,0),16 },
+       { IPv4(129,99,0,0),16 },
+       { IPv4(129,100,0,0),16 },
+       { IPv4(129,105,0,0),16 },
+       { IPv4(129,106,0,0),16 },
+       { IPv4(129,116,0,0),16 },
+       { IPv4(129,123,0,0),16 },
+       { IPv4(129,131,0,0),16 },
+       { IPv4(129,139,0,0),16 },
+       { IPv4(129,141,0,0),16 },
+       { IPv4(129,164,0,0),16 },
+       { IPv4(129,165,0,0),16 },
+       { IPv4(129,172,0,0),16 },
+       { IPv4(129,176,0,0),16 },
+       { IPv4(129,179,0,0),16 },
+       { IPv4(129,186,0,0),16 },
+       { IPv4(129,190,0,0),16 },
+       { IPv4(129,191,0,0),16 },
+       { IPv4(129,196,0,0),16 },
+       { IPv4(129,197,0,0),16 },
+       { IPv4(129,198,0,0),16 },
+       { IPv4(129,200,0,0),16 },
+       { IPv4(129,210,0,0),16 },
+       { IPv4(129,212,0,0),16 },
+       { IPv4(129,218,0,0),16 },
+       { IPv4(129,219,0,0),16 },
+       { IPv4(129,223,96,0),19 },
+       { IPv4(129,223,123,0),24 },
+       { IPv4(129,223,136,0),21 },
+       { IPv4(129,223,148,0),22 },
+       { IPv4(129,223,152,0),24 },
+       { IPv4(129,223,153,0),24 },
+       { IPv4(129,223,155,0),24 },
+       { IPv4(129,225,0,0),16 },
+       { IPv4(129,227,0,0),16 },
+       { IPv4(129,229,0,0),16 },
+       { IPv4(129,235,0,0),16 },
+       { IPv4(129,236,0,0),16 },
+       { IPv4(129,238,0,0),16 },
+       { IPv4(129,239,0,0),16 },
+       { IPv4(129,246,0,0),16 },
+       { IPv4(129,246,6,0),24 },
+       { IPv4(129,250,0,0),16 },
+       { IPv4(129,252,0,0),16 },
+       { IPv4(129,253,0,0),16 },
+       { IPv4(129,254,0,0),16 },
+       { IPv4(129,255,0,0),16 },
+       { IPv4(130,11,0,0),16 },
+       { IPv4(130,13,0,0),16 },
+       { IPv4(130,17,0,0),16 },
+       { IPv4(130,20,0,0),16 },
+       { IPv4(130,22,0,0),16 },
+       { IPv4(130,27,0,0),16 },
+       { IPv4(130,29,0,0),16 },
+       { IPv4(130,30,0,0),16 },
+       { IPv4(130,36,61,0),24 },
+       { IPv4(130,38,0,0),16 },
+       { IPv4(130,44,0,0),16 },
+       { IPv4(130,46,0,0),16 },
+       { IPv4(130,49,0,0),16 },
+       { IPv4(130,49,0,0),17 },
+       { IPv4(130,49,246,0),23 },
+       { IPv4(130,50,0,0),16 },
+       { IPv4(130,50,0,0),17 },
+       { IPv4(130,53,0,0),16 },
+       { IPv4(130,57,0,0),16 },
+       { IPv4(130,64,0,0),16 },
+       { IPv4(130,64,128,0),19 },
+       { IPv4(130,65,0,0),16 },
+       { IPv4(130,71,0,0),16 },
+       { IPv4(130,86,0,0),16 },
+       { IPv4(130,91,0,0),16 },
+       { IPv4(130,94,0,0),16 },
+       { IPv4(130,99,0,0),16 },
+       { IPv4(130,102,28,0),24 },
+       { IPv4(130,107,0,0),16 },
+       { IPv4(130,109,0,0),16 },
+       { IPv4(130,110,0,0),16 },
+       { IPv4(130,114,0,0),16 },
+       { IPv4(130,118,0,0),16 },
+       { IPv4(130,123,0,0),16 },
+       { IPv4(130,126,0,0),16 },
+       { IPv4(130,127,0,0),16 },
+       { IPv4(130,134,0,0),16 },
+       { IPv4(130,135,0,0),16 },
+       { IPv4(130,150,0,0),16 },
+       { IPv4(130,154,0,0),16 },
+       { IPv4(130,157,0,0),16 },
+       { IPv4(130,162,0,0),16 },
+       { IPv4(130,163,0,0),16 },
+       { IPv4(130,164,0,0),16 },
+       { IPv4(130,164,143,0),24 },
+       { IPv4(130,164,166,0),24 },
+       { IPv4(130,164,168,0),24 },
+       { IPv4(130,164,175,0),24 },
+       { IPv4(130,164,254,0),24 },
+       { IPv4(130,166,0,0),16 },
+       { IPv4(130,167,0,0),16 },
+       { IPv4(130,182,0,0),16 },
+       { IPv4(130,187,0,0),16 },
+       { IPv4(130,191,0,0),16 },
+       { IPv4(130,199,0,0),16 },
+       { IPv4(130,202,0,0),16 },
+       { IPv4(130,203,0,0),16 },
+       { IPv4(130,205,0,0),16 },
+       { IPv4(130,207,0,0),16 },
+       { IPv4(130,212,0,0),16 },
+       { IPv4(130,216,0,0),16 },
+       { IPv4(130,218,0,0),16 },
+       { IPv4(130,245,0,0),16 },
+       { IPv4(130,253,0,0),16 },
+       { IPv4(130,254,0,0),16 },
+       { IPv4(131,2,0,0),15 },
+       { IPv4(131,4,0,0),14 },
+       { IPv4(131,8,0,0),13 },
+       { IPv4(131,16,0,0),12 },
+       { IPv4(131,32,0,0),11 },
+       { IPv4(131,36,0,0),16 },
+       { IPv4(131,38,0,0),16 },
+       { IPv4(131,49,0,0),16 },
+       { IPv4(131,64,0,0),12 },
+       { IPv4(131,71,0,0),16 },
+       { IPv4(131,80,0,0),14 },
+       { IPv4(131,86,0,0),15 },
+       { IPv4(131,86,1,0),24 },
+       { IPv4(131,92,0,0),16 },
+       { IPv4(131,96,0,0),16 },
+       { IPv4(131,100,0,0),16 },
+       { IPv4(131,103,0,0),16 },
+       { IPv4(131,107,0,0),16 },
+       { IPv4(131,110,0,0),16 },
+       { IPv4(131,113,0,0),16 },
+       { IPv4(131,120,0,0),16 },
+       { IPv4(131,121,0,0),16 },
+       { IPv4(131,122,0,0),16 },
+       { IPv4(131,123,0,0),16 },
+       { IPv4(131,124,96,0),19 },
+       { IPv4(131,132,0,0),16 },
+       { IPv4(131,135,0,0),16 },
+       { IPv4(131,136,0,0),16 },
+       { IPv4(131,137,0,0),16 },
+       { IPv4(131,144,0,0),16 },
+       { IPv4(131,148,0,0),16 },
+       { IPv4(131,149,0,0),16 },
+       { IPv4(131,151,0,0),16 },
+       { IPv4(131,161,0,0),16 },
+       { IPv4(131,161,54,0),24 },
+       { IPv4(131,161,200,0),22 },
+       { IPv4(131,161,200,0),21 },
+       { IPv4(131,161,208,0),20 },
+       { IPv4(131,161,217,0),24 },
+       { IPv4(131,167,0,0),16 },
+       { IPv4(131,178,0,0),16 },
+       { IPv4(131,179,0,0),16 },
+       { IPv4(131,182,0,0),16 },
+       { IPv4(131,184,0,0),16 },
+       { IPv4(131,184,146,0),24 },
+       { IPv4(131,193,0,0),16 },
+       { IPv4(131,197,66,0),24 },
+       { IPv4(131,197,192,0),24 },
+       { IPv4(131,197,196,0),24 },
+       { IPv4(131,197,224,0),24 },
+       { IPv4(131,197,228,0),24 },
+       { IPv4(131,201,0,0),16 },
+       { IPv4(131,203,0,0),16 },
+       { IPv4(131,204,0,0),16 },
+       { IPv4(131,212,0,0),16 },
+       { IPv4(131,214,0,0),16 },
+       { IPv4(131,218,0,0),16 },
+       { IPv4(131,222,0,0),16 },
+       { IPv4(131,225,0,0),16 },
+       { IPv4(131,230,0,0),16 },
+       { IPv4(131,230,224,0),20 },
+       { IPv4(131,233,0,0),16 },
+       { IPv4(131,239,0,0),16 },
+       { IPv4(131,243,0,0),16 },
+       { IPv4(131,244,0,0),15 },
+       { IPv4(131,250,0,0),16 },
+       { IPv4(132,0,0,0),10 },
+       { IPv4(132,8,1,0),24 },
+       { IPv4(132,15,0,0),16 },
+       { IPv4(132,16,0,0),16 },
+       { IPv4(132,20,0,0),16 },
+       { IPv4(132,61,0,0),16 },
+       { IPv4(132,79,0,0),16 },
+       { IPv4(132,80,0,0),12 },
+       { IPv4(132,96,0,0),11 },
+       { IPv4(132,128,0,0),12 },
+       { IPv4(132,146,0,0),16 },
+       { IPv4(132,151,0,0),18 },
+       { IPv4(132,151,0,0),16 },
+       { IPv4(132,151,64,0),24 },
+       { IPv4(132,156,0,0),16 },
+       { IPv4(132,159,0,0),16 },
+       { IPv4(132,161,0,0),16 },
+       { IPv4(132,163,0,0),16 },
+       { IPv4(132,175,0,0),16 },
+       { IPv4(132,188,0,0),19 },
+       { IPv4(132,189,0,0),16 },
+       { IPv4(132,193,0,0),16 },
+       { IPv4(132,194,0,0),16 },
+       { IPv4(132,200,0,0),16 },
+       { IPv4(132,221,0,0),16 },
+       { IPv4(132,226,0,0),16 },
+       { IPv4(132,228,0,0),16 },
+       { IPv4(132,236,0,0),16 },
+       { IPv4(132,237,0,0),16 },
+       { IPv4(132,239,0,0),16 },
+       { IPv4(132,240,0,0),16 },
+       { IPv4(132,241,0,0),16 },
+       { IPv4(132,247,0,0),16 },
+       { IPv4(132,248,0,0),16 },
+       { IPv4(132,249,0,0),16 },
+       { IPv4(132,249,20,0),24 },
+       { IPv4(132,249,30,0),24 },
+       { IPv4(132,250,0,0),16 },
+       { IPv4(132,254,0,0),16 },
+       { IPv4(132,254,0,0),19 },
+       { IPv4(132,254,48,0),21 },
+       { IPv4(132,254,56,0),21 },
+       { IPv4(132,254,72,0),21 },
+       { IPv4(132,254,78,0),24 },
+       { IPv4(132,254,80,0),21 },
+       { IPv4(132,254,88,0),21 },
+       { IPv4(132,254,96,0),21 },
+       { IPv4(132,254,112,0),21 },
+       { IPv4(132,254,120,0),21 },
+       { IPv4(132,254,128,0),21 },
+       { IPv4(132,254,144,0),21 },
+       { IPv4(132,254,192,0),20 },
+       { IPv4(132,254,208,0),21 },
+       { IPv4(132,254,208,0),20 },
+       { IPv4(132,254,216,0),21 },
+       { IPv4(132,254,224,0),19 },
+       { IPv4(132,254,232,0),24 },
+       { IPv4(133,9,0,0),16 },
+       { IPv4(133,12,0,0),16 },
+       { IPv4(133,18,0,0),16 },
+       { IPv4(133,27,0,0),16 },
+       { IPv4(133,53,0,0),16 },
+       { IPv4(133,54,0,0),16 },
+       { IPv4(133,63,0,0),16 },
+       { IPv4(133,69,0,0),16 },
+       { IPv4(133,105,0,0),16 },
+       { IPv4(133,121,0,0),16 },
+       { IPv4(133,123,0,0),16 },
+       { IPv4(133,126,0,0),16 },
+       { IPv4(133,137,0,0),16 },
+       { IPv4(133,138,0,0),16 },
+       { IPv4(133,144,0,0),16 },
+       { IPv4(133,145,0,0),16 },
+       { IPv4(133,146,0,0),16 },
+       { IPv4(133,170,0,0),16 },
+       { IPv4(133,175,0,0),16 },
+       { IPv4(133,186,0,0),16 },
+       { IPv4(133,187,0,0),16 },
+       { IPv4(133,188,0,0),16 },
+       { IPv4(133,205,0,0),16 },
+       { IPv4(133,217,0,0),16 },
+       { IPv4(133,232,0,0),16 },
+       { IPv4(133,235,0,0),16 },
+       { IPv4(133,243,0,0),16 },
+       { IPv4(133,250,0,0),16 },
+       { IPv4(134,5,0,0),16 },
+       { IPv4(134,8,5,0),24 },
+       { IPv4(134,9,0,0),16 },
+       { IPv4(134,10,0,0),16 },
+       { IPv4(134,12,0,0),16 },
+       { IPv4(134,13,0,0),16 },
+       { IPv4(134,15,0,0),16 },
+       { IPv4(134,17,0,0),16 },
+       { IPv4(134,18,0,0),16 },
+       { IPv4(134,20,0,0),16 },
+       { IPv4(134,24,0,0),16 },
+       { IPv4(134,24,10,0),24 },
+       { IPv4(134,24,71,0),24 },
+       { IPv4(134,24,92,0),24 },
+       { IPv4(134,24,100,0),24 },
+       { IPv4(134,24,123,0),24 },
+       { IPv4(134,24,125,0),24 },
+       { IPv4(134,24,153,0),24 },
+       { IPv4(134,29,0,0),16 },
+       { IPv4(134,43,10,0),23 },
+       { IPv4(134,43,12,0),24 },
+       { IPv4(134,43,61,0),24 },
+       { IPv4(134,43,101,0),24 },
+       { IPv4(134,49,0,0),16 },
+       { IPv4(134,49,68,0),22 },
+       { IPv4(134,49,72,0),22 },
+       { IPv4(134,49,128,0),21 },
+       { IPv4(134,49,136,0),21 },
+       { IPv4(134,50,0,0),16 },
+       { IPv4(134,54,0,0),16 },
+       { IPv4(134,55,0,0),16 },
+       { IPv4(134,57,0,0),16 },
+       { IPv4(134,65,0,0),16 },
+       { IPv4(134,66,0,0),16 },
+       { IPv4(134,71,0,0),16 },
+       { IPv4(134,74,0,0),16 },
+       { IPv4(134,75,0,0),16 },
+       { IPv4(134,75,7,0),24 },
+       { IPv4(134,75,12,0),24 },
+       { IPv4(134,75,18,0),24 },
+       { IPv4(134,75,30,0),24 },
+       { IPv4(134,75,50,0),24 },
+       { IPv4(134,75,55,0),24 },
+       { IPv4(134,75,122,0),24 },
+       { IPv4(134,75,171,0),24 },
+       { IPv4(134,75,172,0),24 },
+       { IPv4(134,75,180,0),24 },
+       { IPv4(134,75,196,0),24 },
+       { IPv4(134,75,197,0),24 },
+       { IPv4(134,75,217,0),24 },
+       { IPv4(134,75,226,0),24 },
+       { IPv4(134,78,0,0),16 },
+       { IPv4(134,78,0,0),15 },
+       { IPv4(134,79,0,0),16 },
+       { IPv4(134,82,0,0),16 },
+       { IPv4(134,84,0,0),16 },
+       { IPv4(134,114,0,0),16 },
+       { IPv4(134,120,0,0),16 },
+       { IPv4(134,124,0,0),16 },
+       { IPv4(134,125,0,0),16 },
+       { IPv4(134,127,0,0),16 },
+       { IPv4(134,128,160,0),22 },
+       { IPv4(134,131,0,0),16 },
+       { IPv4(134,136,0,0),16 },
+       { IPv4(134,137,0,0),16 },
+       { IPv4(134,139,0,0),16 },
+       { IPv4(134,141,0,0),16 },
+       { IPv4(134,141,242,0),24 },
+       { IPv4(134,160,0,0),16 },
+       { IPv4(134,161,0,0),16 },
+       { IPv4(134,164,0,0),16 },
+       { IPv4(134,167,0,0),16 },
+       { IPv4(134,172,0,0),16 },
+       { IPv4(134,180,0,0),16 },
+       { IPv4(134,193,0,0),16 },
+       { IPv4(134,194,0,0),16 },
+       { IPv4(134,201,0,0),16 },
+       { IPv4(134,207,0,0),16 },
+       { IPv4(134,208,0,0),16 },
+       { IPv4(134,217,0,0),16 },
+       { IPv4(134,224,0,0),16 },
+       { IPv4(134,229,0,0),16 },
+       { IPv4(134,231,0,0),16 },
+       { IPv4(134,233,0,0),16 },
+       { IPv4(134,235,0,0),16 },
+       { IPv4(134,238,0,0),16 },
+       { IPv4(134,239,0,0),16 },
+       { IPv4(134,240,0,0),16 },
+       { IPv4(134,241,0,0),17 },
+       { IPv4(134,241,0,0),16 },
+       { IPv4(134,241,128,0),17 },
+       { IPv4(134,247,0,0),16 },
+       { IPv4(134,250,0,0),16 },
+       { IPv4(134,252,0,0),16 },
+       { IPv4(134,253,0,0),16 },
+       { IPv4(135,53,0,0),16 },
+       { IPv4(135,76,9,0),24 },
+       { IPv4(135,118,6,0),24 },
+       { IPv4(135,118,7,0),24 },
+       { IPv4(135,118,8,0),24 },
+       { IPv4(135,118,9,0),24 },
+       { IPv4(135,120,254,0),24 },
+       { IPv4(135,138,233,0),24 },
+       { IPv4(135,145,0,0),16 },
+       { IPv4(135,155,0,0),16 },
+       { IPv4(135,197,0,0),16 },
+       { IPv4(135,206,0,0),16 },
+       { IPv4(135,209,0,0),18 },
+       { IPv4(135,209,64,0),19 },
+       { IPv4(135,209,96,0),19 },
+       { IPv4(135,209,128,0),17 },
+       { IPv4(135,214,0,0),16 },
+       { IPv4(135,216,0,0),16 },
+       { IPv4(135,218,0,0),16 },
+       { IPv4(135,250,0,0),16 },
+       { IPv4(136,1,0,0),16 },
+       { IPv4(136,2,0,0),16 },
+       { IPv4(136,141,0,0),16 },
+       { IPv4(136,142,0,0),16 },
+       { IPv4(136,149,0,0),16 },
+       { IPv4(136,150,0,0),16 },
+       { IPv4(136,150,2,0),24 },
+       { IPv4(136,150,4,0),24 },
+       { IPv4(136,150,40,0),24 },
+       { IPv4(136,150,45,0),24 },
+       { IPv4(136,150,46,0),24 },
+       { IPv4(136,150,60,0),24 },
+       { IPv4(136,150,100,0),24 },
+       { IPv4(136,150,102,0),24 },
+       { IPv4(136,150,103,0),24 },
+       { IPv4(136,152,0,0),16 },
+       { IPv4(136,154,0,0),16 },
+       { IPv4(136,166,0,0),16 },
+       { IPv4(136,167,0,0),16 },
+       { IPv4(136,168,0,0),16 },
+       { IPv4(136,175,0,0),16 },
+       { IPv4(136,176,0,0),16 },
+       { IPv4(136,177,0,0),16 },
+       { IPv4(136,184,0,0),16 },
+       { IPv4(136,204,0,0),16 },
+       { IPv4(136,204,192,0),19 },
+       { IPv4(136,204,224,0),22 },
+       { IPv4(136,204,228,0),23 },
+       { IPv4(136,205,0,0),16 },
+       { IPv4(136,207,0,0),16 },
+       { IPv4(136,209,0,0),16 },
+       { IPv4(136,212,0,0),14 },
+       { IPv4(136,216,0,0),13 },
+       { IPv4(136,223,0,0),20 },
+       { IPv4(136,223,16,0),24 },
+       { IPv4(136,223,17,0),24 },
+       { IPv4(136,223,18,0),24 },
+       { IPv4(136,223,19,0),24 },
+       { IPv4(136,223,32,0),24 },
+       { IPv4(136,223,96,0),24 },
+       { IPv4(136,223,97,0),24 },
+       { IPv4(136,224,0,0),18 },
+       { IPv4(136,224,0,0),16 },
+       { IPv4(136,224,64,0),19 },
+       { IPv4(136,224,96,0),19 },
+       { IPv4(136,224,124,0),23 },
+       { IPv4(136,224,128,0),20 },
+       { IPv4(136,224,144,0),20 },
+       { IPv4(136,224,160,0),20 },
+       { IPv4(136,224,176,0),20 },
+       { IPv4(136,224,192,0),19 },
+       { IPv4(136,224,224,0),21 },
+       { IPv4(136,224,232,0),21 },
+       { IPv4(136,224,240,0),21 },
+       { IPv4(136,224,248,0),21 },
+       { IPv4(136,226,0,0),16 },
+       { IPv4(136,229,0,0),16 },
+       { IPv4(136,234,0,0),16 },
+       { IPv4(136,237,0,0),16 },
+       { IPv4(136,244,0,0),16 },
+       { IPv4(136,244,0,0),19 },
+       { IPv4(136,244,32,0),19 },
+       { IPv4(136,244,64,0),19 },
+       { IPv4(136,244,96,0),19 },
+       { IPv4(136,244,128,0),17 },
+       { IPv4(136,248,0,0),16 },
+       { IPv4(137,0,0,0),13 },
+       { IPv4(137,8,0,0),14 },
+       { IPv4(137,14,0,0),16 },
+       { IPv4(137,16,0,0),16 },
+       { IPv4(137,21,0,0),16 },
+       { IPv4(137,22,0,0),16 },
+       { IPv4(137,24,0,0),16 },
+       { IPv4(137,32,0,0),16 },
+       { IPv4(137,33,0,0),16 },
+       { IPv4(137,37,0,0),16 },
+       { IPv4(137,38,0,0),16 },
+       { IPv4(137,49,0,0),16 },
+       { IPv4(137,65,0,0),16 },
+       { IPv4(137,66,0,0),16 },
+       { IPv4(137,67,0,0),16 },
+       { IPv4(137,68,0,0),16 },
+       { IPv4(137,70,0,0),16 },
+       { IPv4(137,75,0,0),16 },
+       { IPv4(137,77,0,0),16 },
+       { IPv4(137,78,0,0),16 },
+       { IPv4(137,79,0,0),16 },
+       { IPv4(137,80,0,0),16 },
+       { IPv4(137,95,0,0),16 },
+       { IPv4(137,97,0,0),16 },
+       { IPv4(137,103,0,0),16 },
+       { IPv4(137,110,0,0),16 },
+       { IPv4(137,118,192,0),22 },
+       { IPv4(137,124,0,0),16 },
+       { IPv4(137,125,0,0),16 },
+       { IPv4(137,128,0,0),16 },
+       { IPv4(137,131,0,0),16 },
+       { IPv4(137,132,0,0),16 },
+       { IPv4(137,139,0,0),16 },
+       { IPv4(137,140,0,0),16 },
+       { IPv4(137,141,0,0),16 },
+       { IPv4(137,142,0,0),16 },
+       { IPv4(137,143,0,0),16 },
+       { IPv4(137,143,128,0),17 },
+       { IPv4(137,145,0,0),16 },
+       { IPv4(137,150,0,0),16 },
+       { IPv4(137,151,0,0),16 },
+       { IPv4(137,158,0,0),16 },
+       { IPv4(137,159,0,0),16 },
+       { IPv4(137,164,1,0),24 },
+       { IPv4(137,164,2,0),24 },
+       { IPv4(137,164,3,0),24 },
+       { IPv4(137,164,4,0),24 },
+       { IPv4(137,164,5,0),24 },
+       { IPv4(137,164,6,0),24 },
+       { IPv4(137,164,7,0),24 },
+       { IPv4(137,164,8,0),24 },
+       { IPv4(137,164,9,0),24 },
+       { IPv4(137,164,10,0),24 },
+       { IPv4(137,164,11,0),24 },
+       { IPv4(137,164,12,0),24 },
+       { IPv4(137,164,13,0),24 },
+       { IPv4(137,164,14,0),24 },
+       { IPv4(137,169,0,0),16 },
+       { IPv4(137,169,80,0),24 },
+       { IPv4(137,169,81,0),24 },
+       { IPv4(137,169,144,0),20 },
+       { IPv4(137,170,0,0),16 },
+       { IPv4(137,190,0,0),16 },
+       { IPv4(137,192,0,0),16 },
+       { IPv4(137,209,0,0),16 },
+       { IPv4(137,214,0,0),15 },
+       { IPv4(137,227,0,0),16 },
+       { IPv4(137,228,0,0),16 },
+       { IPv4(137,230,0,0),16 },
+       { IPv4(137,240,0,0),14 },
+       { IPv4(137,244,0,0),16 },
+       { IPv4(137,246,0,0),16 },
+       { IPv4(137,247,0,0),16 },
+       { IPv4(137,252,0,0),16 },
+       { IPv4(138,5,0,0),16 },
+       { IPv4(138,12,0,0),16 },
+       { IPv4(138,13,0,0),16 },
+       { IPv4(138,18,0,0),16 },
+       { IPv4(138,18,144,0),24 },
+       { IPv4(138,23,0,0),16 },
+       { IPv4(138,27,0,0),16 },
+       { IPv4(138,29,0,0),16 },
+       { IPv4(138,32,32,0),20 },
+       { IPv4(138,32,48,0),20 },
+       { IPv4(138,39,0,0),16 },
+       { IPv4(138,46,0,0),16 },
+       { IPv4(138,50,0,0),16 },
+       { IPv4(138,60,0,0),16 },
+       { IPv4(138,67,0,0),16 },
+       { IPv4(138,72,0,0),16 },
+       { IPv4(138,84,0,0),16 },
+       { IPv4(138,86,0,0),16 },
+       { IPv4(138,87,0,0),16 },
+       { IPv4(138,92,0,0),16 },
+       { IPv4(138,101,0,0),16 },
+       { IPv4(138,105,0,0),16 },
+       { IPv4(138,107,0,0),16 },
+       { IPv4(138,115,0,0),16 },
+       { IPv4(138,116,0,0),16 },
+       { IPv4(138,125,0,0),16 },
+       { IPv4(138,127,0,0),16 },
+       { IPv4(138,129,0,0),16 },
+       { IPv4(138,132,0,0),16 },
+       { IPv4(138,136,0,0),13 },
+       { IPv4(138,144,0,0),12 },
+       { IPv4(138,164,0,0),16 },
+       { IPv4(138,164,0,0),14 },
+       { IPv4(138,168,0,0),14 },
+       { IPv4(138,168,0,0),16 },
+       { IPv4(138,178,0,0),15 },
+       { IPv4(138,180,0,0),14 },
+       { IPv4(138,181,0,0),16 },
+       { IPv4(138,183,0,0),17 },
+       { IPv4(138,184,0,0),16 },
+       { IPv4(138,189,0,0),16 },
+       { IPv4(138,198,0,0),16 },
+       { IPv4(138,226,0,0),16 },
+       { IPv4(138,229,0,0),16 },
+       { IPv4(138,230,0,0),16 },
+       { IPv4(138,234,0,0),16 },
+       { IPv4(139,2,0,0),16 },
+       { IPv4(139,27,0,0),16 },
+       { IPv4(139,47,0,0),16 },
+       { IPv4(139,48,0,0),16 },
+       { IPv4(139,53,0,0),16 },
+       { IPv4(139,56,64,0),19 },
+       { IPv4(139,62,0,0),16 },
+       { IPv4(139,65,0,0),16 },
+       { IPv4(139,67,0,0),16 },
+       { IPv4(139,72,0,0),16 },
+       { IPv4(139,87,0,0),16 },
+       { IPv4(139,88,0,0),16 },
+       { IPv4(139,92,0,0),16 },
+       { IPv4(139,93,0,0),16 },
+       { IPv4(139,131,128,0),18 },
+       { IPv4(139,131,192,0),19 },
+       { IPv4(139,139,0,0),16 },
+       { IPv4(139,141,0,0),16 },
+       { IPv4(139,144,0,0),16 },
+       { IPv4(139,152,0,0),16 },
+       { IPv4(139,161,0,0),16 },
+       { IPv4(139,169,0,0),16 },
+       { IPv4(139,171,0,0),19 },
+       { IPv4(139,171,0,0),16 },
+       { IPv4(139,171,24,0),21 },
+       { IPv4(139,175,0,0),16 },
+       { IPv4(139,175,12,0),23 },
+       { IPv4(139,175,56,0),24 },
+       { IPv4(139,175,57,0),24 },
+       { IPv4(139,175,58,0),24 },
+       { IPv4(139,175,59,0),24 },
+       { IPv4(139,175,169,0),24 },
+       { IPv4(139,175,192,0),18 },
+       { IPv4(139,175,252,0),24 },
+       { IPv4(139,180,0,0),16 },
+       { IPv4(139,182,0,0),16 },
+       { IPv4(139,223,0,0),17 },
+       { IPv4(139,223,0,0),16 },
+       { IPv4(139,223,0,0),22 },
+       { IPv4(139,223,2,0),24 },
+       { IPv4(139,223,4,0),22 },
+       { IPv4(139,223,8,0),21 },
+       { IPv4(139,223,16,0),20 },
+       { IPv4(139,223,32,0),19 },
+       { IPv4(139,223,64,0),18 },
+       { IPv4(139,223,128,0),18 },
+       { IPv4(139,223,160,0),20 },
+       { IPv4(139,223,187,0),24 },
+       { IPv4(139,223,188,0),24 },
+       { IPv4(139,223,189,0),24 },
+       { IPv4(139,223,190,0),24 },
+       { IPv4(139,223,191,0),24 },
+       { IPv4(139,223,192,0),19 },
+       { IPv4(139,223,192,0),24 },
+       { IPv4(139,223,193,0),24 },
+       { IPv4(139,223,195,0),24 },
+       { IPv4(139,223,196,0),24 },
+       { IPv4(139,223,197,0),24 },
+       { IPv4(139,223,198,0),24 },
+       { IPv4(139,223,199,0),24 },
+       { IPv4(139,223,200,0),24 },
+       { IPv4(139,223,220,0),22 },
+       { IPv4(139,223,224,0),19 },
+       { IPv4(139,223,232,0),24 },
+       { IPv4(139,231,17,0),24 },
+       { IPv4(139,232,0,0),16 },
+       { IPv4(140,31,0,0),18 },
+       { IPv4(140,31,192,0),21 },
+       { IPv4(140,32,0,0),16 },
+       { IPv4(140,35,0,0),16 },
+       { IPv4(140,45,0,0),16 },
+       { IPv4(140,47,0,0),16 },
+       { IPv4(140,88,0,0),16 },
+       { IPv4(140,89,0,0),16 },
+       { IPv4(140,92,0,0),16 },
+       { IPv4(140,95,0,0),16 },
+       { IPv4(140,95,9,0),24 },
+       { IPv4(140,95,205,0),24 },
+       { IPv4(140,95,224,0),24 },
+       { IPv4(140,96,0,0),16 },
+       { IPv4(140,99,0,0),16 },
+       { IPv4(140,99,96,0),19 },
+       { IPv4(140,100,0,0),17 },
+       { IPv4(140,100,0,0),16 },
+       { IPv4(140,100,4,0),24 },
+       { IPv4(140,100,128,0),18 },
+       { IPv4(140,100,192,0),18 },
+       { IPv4(140,107,0,0),16 },
+       { IPv4(140,109,0,0),16 },
+       { IPv4(140,110,0,0),15 },
+       { IPv4(140,112,0,0),14 },
+       { IPv4(140,112,0,0),16 },
+       { IPv4(140,113,0,0),16 },
+       { IPv4(140,114,0,0),15 },
+       { IPv4(140,116,0,0),14 },
+       { IPv4(140,120,0,0),14 },
+       { IPv4(140,124,0,0),15 },
+       { IPv4(140,126,0,0),16 },
+       { IPv4(140,127,0,0),16 },
+       { IPv4(140,128,0,0),13 },
+       { IPv4(140,136,0,0),15 },
+       { IPv4(140,138,0,0),16 },
+       { IPv4(140,139,0,0),16 },
+       { IPv4(140,139,28,64),27 },
+       { IPv4(140,140,0,0),16 },
+       { IPv4(140,144,0,0),16 },
+       { IPv4(140,145,0,0),16 },
+       { IPv4(140,148,0,0),16 },
+       { IPv4(140,152,0,0),14 },
+       { IPv4(140,153,5,0),25 },
+       { IPv4(140,153,13,0),25 },
+       { IPv4(140,153,18,0),25 },
+       { IPv4(140,153,21,0),25 },
+       { IPv4(140,153,99,0),25 },
+       { IPv4(140,153,107,0),25 },
+       { IPv4(140,153,189,0),27 },
+       { IPv4(140,156,0,0),16 },
+       { IPv4(140,157,38,0),23 },
+       { IPv4(140,157,40,0),23 },
+       { IPv4(140,157,42,0),23 },
+       { IPv4(140,157,44,0),23 },
+       { IPv4(140,157,48,0),23 },
+       { IPv4(140,157,52,0),23 },
+       { IPv4(140,163,0,0),16 },
+       { IPv4(140,169,0,0),16 },
+       { IPv4(140,172,0,0),16 },
+       { IPv4(140,174,0,0),16 },
+       { IPv4(140,174,85,0),24 },
+       { IPv4(140,174,105,0),24 },
+       { IPv4(140,174,208,0),24 },
+       { IPv4(140,175,0,0),16 },
+       { IPv4(140,176,0,0),16 },
+       { IPv4(140,178,0,0),16 },
+       { IPv4(140,180,0,0),16 },
+       { IPv4(140,182,0,0),16 },
+       { IPv4(140,183,0,0),16 },
+       { IPv4(140,186,0,0),16 },
+       { IPv4(140,186,46,0),24 },
+       { IPv4(140,186,70,0),24 },
+       { IPv4(140,186,96,0),24 },
+       { IPv4(140,186,112,0),24 },
+       { IPv4(140,186,129,0),24 },
+       { IPv4(140,186,130,0),23 },
+       { IPv4(140,186,132,0),23 },
+       { IPv4(140,186,144,0),23 },
+       { IPv4(140,186,160,0),22 },
+       { IPv4(140,187,0,0),16 },
+       { IPv4(140,192,0,0),16 },
+       { IPv4(140,195,0,0),16 },
+       { IPv4(140,196,0,0),16 },
+       { IPv4(140,198,0,0),16 },
+       { IPv4(140,201,0,0),16 },
+       { IPv4(140,204,240,0),21 },
+       { IPv4(140,209,0,0),16 },
+       { IPv4(140,212,0,0),16 },
+       { IPv4(140,212,200,0),22 },
+       { IPv4(140,212,204,0),24 },
+       { IPv4(140,212,205,0),24 },
+       { IPv4(140,212,206,0),24 },
+       { IPv4(140,214,0,0),15 },
+       { IPv4(140,216,0,0),14 },
+       { IPv4(140,221,0,0),16 },
+       { IPv4(140,225,0,0),16 },
+       { IPv4(140,226,0,0),16 },
+       { IPv4(140,229,0,0),16 },
+       { IPv4(140,233,0,0),16 },
+       { IPv4(140,237,32,0),19 },
+       { IPv4(140,239,0,0),16 },
+       { IPv4(140,239,177,0),24 },
+       { IPv4(140,239,214,0),24 },
+       { IPv4(140,241,0,0),16 },
+       { IPv4(140,251,0,0),16 },
+       { IPv4(140,252,0,0),16 },
+       { IPv4(141,92,0,0),16 },
+       { IPv4(141,93,0,0),16 },
+       { IPv4(141,102,0,0),16 },
+       { IPv4(141,103,0,0),16 },
+       { IPv4(141,111,0,0),16 },
+       { IPv4(141,121,0,0),16 },
+       { IPv4(141,122,0,0),16 },
+       { IPv4(141,129,0,0),16 },
+       { IPv4(141,140,0,0),16 },
+       { IPv4(141,141,0,0),16 },
+       { IPv4(141,142,0,0),16 },
+       { IPv4(141,160,0,0),16 },
+       { IPv4(141,164,0,0),16 },
+       { IPv4(141,165,0,0),16 },
+       { IPv4(141,173,0,0),16 },
+       { IPv4(141,176,0,0),16 },
+       { IPv4(141,178,0,0),16 },
+       { IPv4(141,179,0,0),16 },
+       { IPv4(141,183,0,0),16 },
+       { IPv4(141,184,0,0),16 },
+       { IPv4(141,187,0,0),16 },
+       { IPv4(141,188,0,0),16 },
+       { IPv4(141,189,0,0),16 },
+       { IPv4(141,190,0,0),16 },
+       { IPv4(141,197,4,0),23 },
+       { IPv4(141,197,8,0),23 },
+       { IPv4(141,198,0,0),16 },
+       { IPv4(141,204,0,0),16 },
+       { IPv4(141,205,0,0),16 },
+       { IPv4(141,221,0,0),16 },
+       { IPv4(141,222,0,0),16 },
+       { IPv4(141,223,0,0),18 },
+       { IPv4(141,223,0,0),16 },
+       { IPv4(141,223,64,0),18 },
+       { IPv4(141,223,128,0),18 },
+       { IPv4(141,223,192,0),18 },
+       { IPv4(141,224,0,0),16 },
+       { IPv4(141,234,0,0),15 },
+       { IPv4(141,236,0,0),16 },
+       { IPv4(141,238,0,0),16 },
+       { IPv4(141,238,64,0),20 },
+       { IPv4(141,238,80,0),20 },
+       { IPv4(141,238,96,0),19 },
+       { IPv4(141,240,0,0),16 },
+       { IPv4(141,242,0,0),16 },
+       { IPv4(141,246,0,0),16 },
+       { IPv4(141,248,0,0),16 },
+       { IPv4(141,254,0,0),16 },
+       { IPv4(142,21,0,0),16 },
+       { IPv4(142,42,0,0),16 },
+       { IPv4(142,42,242,0),24 },
+       { IPv4(142,44,0,0),16 },
+       { IPv4(142,51,0,0),16 },
+       { IPv4(142,66,31,0),24 },
+       { IPv4(142,78,0,0),16 },
+       { IPv4(142,79,0,0),16 },
+       { IPv4(142,89,0,0),16 },
+       { IPv4(142,130,0,0),16 },
+       { IPv4(142,144,0,0),16 },
+       { IPv4(142,146,0,0),16 },
+       { IPv4(142,146,41,0),24 },
+       { IPv4(142,146,42,0),24 },
+       { IPv4(142,146,246,0),24 },
+       { IPv4(142,146,247,0),24 },
+       { IPv4(142,146,248,0),24 },
+       { IPv4(142,146,253,0),24 },
+       { IPv4(142,147,0,0),16 },
+       { IPv4(142,154,0,0),16 },
+       { IPv4(142,154,224,0),19 },
+       { IPv4(142,158,0,0),16 },
+       { IPv4(142,192,200,0),24 },
+       { IPv4(142,194,0,0),16 },
+       { IPv4(142,194,32,0),19 },
+       { IPv4(142,194,96,0),19 },
+       { IPv4(142,194,128,0),19 },
+       { IPv4(142,194,160,0),19 },
+       { IPv4(142,194,192,0),19 },
+       { IPv4(142,194,224,0),19 },
+       { IPv4(142,201,0,0),16 },
+       { IPv4(142,205,0,0),16 },
+       { IPv4(142,205,54,0),23 },
+       { IPv4(142,205,60,0),23 },
+       { IPv4(142,205,232,0),23 },
+       { IPv4(142,205,240,0),23 },
+       { IPv4(142,205,248,0),23 },
+       { IPv4(142,206,0,0),16 },
+       { IPv4(142,238,0,0),16 },
+       { IPv4(142,245,0,0),16 },
+       { IPv4(142,245,0,0),19 },
+       { IPv4(142,245,192,0),22 },
+       { IPv4(143,43,0,0),16 },
+       { IPv4(143,43,112,0),20 },
+       { IPv4(143,43,192,0),18 },
+       { IPv4(143,45,0,0),16 },
+       { IPv4(143,46,0,0),16 },
+       { IPv4(143,48,0,0),16 },
+       { IPv4(143,56,0,0),16 },
+       { IPv4(143,58,0,0),16 },
+       { IPv4(143,58,0,0),19 },
+       { IPv4(143,58,32,0),22 },
+       { IPv4(143,58,36,0),22 },
+       { IPv4(143,58,40,0),22 },
+       { IPv4(143,58,100,0),22 },
+       { IPv4(143,58,104,0),22 },
+       { IPv4(143,58,164,0),22 },
+       { IPv4(143,58,168,0),22 },
+       { IPv4(143,58,172,0),22 },
+       { IPv4(143,58,176,0),22 },
+       { IPv4(143,58,180,0),22 },
+       { IPv4(143,58,184,0),22 },
+       { IPv4(143,58,245,0),24 },
+       { IPv4(143,58,246,0),24 },
+       { IPv4(143,61,0,0),16 },
+       { IPv4(143,61,38,0),24 },
+       { IPv4(143,61,153,0),24 },
+       { IPv4(143,61,154,0),24 },
+       { IPv4(143,61,156,0),24 },
+       { IPv4(143,61,233,0),24 },
+       { IPv4(143,62,0,0),16 },
+       { IPv4(143,66,0,0),16 },
+       { IPv4(143,67,0,0),16 },
+       { IPv4(143,68,0,0),16 },
+       { IPv4(143,77,0,0),16 },
+       { IPv4(143,78,0,0),16 },
+       { IPv4(143,81,0,0),16 },
+       { IPv4(143,83,0,0),16 },
+       { IPv4(143,85,0,0),16 },
+       { IPv4(143,85,107,0),25 },
+       { IPv4(143,96,0,0),16 },
+       { IPv4(143,100,0,0),16 },
+       { IPv4(143,104,0,0),16 },
+       { IPv4(143,110,0,0),16 },
+       { IPv4(143,111,0,0),16 },
+       { IPv4(143,113,0,0),16 },
+       { IPv4(143,115,0,0),16 },
+       { IPv4(143,115,160,0),19 },
+       { IPv4(143,116,0,0),16 },
+       { IPv4(143,119,0,0),16 },
+       { IPv4(143,127,0,0),19 },
+       { IPv4(143,128,0,0),16 },
+       { IPv4(143,134,0,0),16 },
+       { IPv4(143,138,0,0),16 },
+       { IPv4(143,138,0,0),15 },
+       { IPv4(143,152,0,0),14 },
+       { IPv4(143,158,0,0),16 },
+       { IPv4(143,160,0,0),16 },
+       { IPv4(143,164,96,0),22 },
+       { IPv4(143,166,0,0),16 },
+       { IPv4(143,176,0,0),14 },
+       { IPv4(143,187,0,0),16 },
+       { IPv4(143,191,0,0),16 },
+       { IPv4(143,192,0,0),16 },
+       { IPv4(143,195,0,0),16 },
+       { IPv4(143,197,0,0),16 },
+       { IPv4(143,211,0,0),16 },
+       { IPv4(143,212,0,0),15 },
+       { IPv4(143,214,0,0),16 },
+       { IPv4(143,217,0,0),16 },
+       { IPv4(143,223,20,0),24 },
+       { IPv4(143,226,0,0),16 },
+       { IPv4(143,227,0,0),16 },
+       { IPv4(143,227,48,0),20 },
+       { IPv4(143,229,0,0),16 },
+       { IPv4(143,230,0,0),16 },
+       { IPv4(143,232,0,0),16 },
+       { IPv4(143,243,0,0),16 },
+       { IPv4(143,244,0,0),16 },
+       { IPv4(143,245,0,0),16 },
+       { IPv4(143,247,0,0),16 },
+       { IPv4(143,248,0,0),16 },
+       { IPv4(143,249,0,0),16 },
+       { IPv4(143,250,0,0),16 },
+       { IPv4(144,3,0,0),16 },
+       { IPv4(144,11,0,0),16 },
+       { IPv4(144,15,0,0),16 },
+       { IPv4(144,15,249,0),24 },
+       { IPv4(144,15,252,0),24 },
+       { IPv4(144,17,0,0),16 },
+       { IPv4(144,18,0,0),16 },
+       { IPv4(144,34,0,0),16 },
+       { IPv4(144,35,0,0),16 },
+       { IPv4(144,37,0,0),16 },
+       { IPv4(144,38,0,0),16 },
+       { IPv4(144,39,0,0),16 },
+       { IPv4(144,45,0,0),16 },
+       { IPv4(144,47,0,0),16 },
+       { IPv4(144,49,0,0),16 },
+       { IPv4(144,49,1,0),24 },
+       { IPv4(144,49,2,0),24 },
+       { IPv4(144,49,8,0),24 },
+       { IPv4(144,58,0,0),16 },
+       { IPv4(144,59,0,0),16 },
+       { IPv4(144,73,0,0),16 },
+       { IPv4(144,74,0,0),16 },
+       { IPv4(144,80,14,0),23 },
+       { IPv4(144,80,60,0),22 },
+       { IPv4(144,80,92,0),22 },
+       { IPv4(144,81,0,0),16 },
+       { IPv4(144,86,0,0),16 },
+       { IPv4(144,90,0,0),16 },
+       { IPv4(144,95,0,0),16 },
+       { IPv4(144,99,0,0),16 },
+       { IPv4(144,100,0,0),14 },
+       { IPv4(144,104,0,0),14 },
+       { IPv4(144,109,0,0),16 },
+       { IPv4(144,119,0,0),16 },
+       { IPv4(144,141,0,0),16 },
+       { IPv4(144,147,0,0),16 },
+       { IPv4(144,169,0,0),16 },
+       { IPv4(144,170,0,0),16 },
+       { IPv4(144,182,0,0),15 },
+       { IPv4(144,183,200,0),21 },
+       { IPv4(144,183,208,0),21 },
+       { IPv4(144,183,208,0),24 },
+       { IPv4(144,184,0,0),16 },
+       { IPv4(144,197,0,0),16 },
+       { IPv4(144,198,0,0),16 },
+       { IPv4(144,198,20,0),24 },
+       { IPv4(144,198,24,0),22 },
+       { IPv4(144,198,32,0),19 },
+       { IPv4(144,198,70,0),24 },
+       { IPv4(144,198,191,0),24 },
+       { IPv4(144,198,192,0),24 },
+       { IPv4(144,198,200,0),24 },
+       { IPv4(144,198,207,0),24 },
+       { IPv4(144,198,226,0),24 },
+       { IPv4(144,199,0,0),16 },
+       { IPv4(144,207,0,0),16 },
+       { IPv4(144,213,0,0),16 },
+       { IPv4(144,244,0,0),16 },
+       { IPv4(144,245,0,0),16 },
+       { IPv4(144,246,0,0),16 },
+       { IPv4(144,247,0,0),16 },
+       { IPv4(144,247,216,0),21 },
+       { IPv4(144,247,224,0),21 },
+       { IPv4(144,247,240,0),20 },
+       { IPv4(144,251,0,0),16 },
+       { IPv4(144,252,0,0),16 },
+       { IPv4(144,254,0,0),16 },
+       { IPv4(145,7,0,0),16 },
+       { IPv4(145,8,0,0),16 },
+       { IPv4(145,10,0,0),16 },
+       { IPv4(145,53,0,0),16 },
+       { IPv4(145,61,0,0),16 },
+       { IPv4(145,63,0,0),16 },
+       { IPv4(145,66,0,0),16 },
+       { IPv4(145,69,0,0),16 },
+       { IPv4(145,77,103,0),24 },
+       { IPv4(145,224,0,0),16 },
+       { IPv4(145,224,255,0),24 },
+       { IPv4(145,225,203,0),24 },
+       { IPv4(145,225,204,0),24 },
+       { IPv4(145,229,0,0),16 },
+       { IPv4(145,232,0,0),16 },
+       { IPv4(146,1,8,0),21 },
+       { IPv4(146,5,0,0),16 },
+       { IPv4(146,6,0,0),16 },
+       { IPv4(146,7,0,0),16 },
+       { IPv4(146,18,0,0),16 },
+       { IPv4(146,20,18,0),23 },
+       { IPv4(146,20,23,0),24 },
+       { IPv4(146,20,33,0),24 },
+       { IPv4(146,20,34,0),24 },
+       { IPv4(146,53,0,0),16 },
+       { IPv4(146,57,0,0),16 },
+       { IPv4(146,58,0,0),16 },
+       { IPv4(146,64,0,0),16 },
+       { IPv4(146,68,0,0),16 },
+       { IPv4(146,79,0,0),16 },
+       { IPv4(146,83,132,0),24 },
+       { IPv4(146,83,135,0),24 },
+       { IPv4(146,83,149,0),24 },
+       { IPv4(146,83,164,0),24 },
+       { IPv4(146,84,0,0),16 },
+       { IPv4(146,86,0,0),16 },
+       { IPv4(146,94,0,0),16 },
+       { IPv4(146,95,0,0),16 },
+       { IPv4(146,96,0,0),16 },
+       { IPv4(146,99,0,0),16 },
+       { IPv4(146,111,0,0),16 },
+       { IPv4(146,115,0,0),16 },
+       { IPv4(146,122,0,0),16 },
+       { IPv4(146,126,0,0),16 },
+       { IPv4(146,126,2,0),24 },
+       { IPv4(146,126,51,0),24 },
+       { IPv4(146,126,61,0),24 },
+       { IPv4(146,126,73,0),24 },
+       { IPv4(146,126,86,0),24 },
+       { IPv4(146,126,88,0),24 },
+       { IPv4(146,132,0,0),16 },
+       { IPv4(146,137,0,0),16 },
+       { IPv4(146,139,0,0),16 },
+       { IPv4(146,141,0,0),16 },
+       { IPv4(146,145,153,0),24 },
+       { IPv4(146,150,0,0),16 },
+       { IPv4(146,152,0,0),16 },
+       { IPv4(146,153,0,0),16 },
+       { IPv4(146,154,0,0),16 },
+       { IPv4(146,155,0,0),16 },
+       { IPv4(146,157,0,0),16 },
+       { IPv4(146,163,0,0),16 },
+       { IPv4(146,165,0,0),16 },
+       { IPv4(146,167,0,0),16 },
+       { IPv4(146,168,14,0),24 },
+       { IPv4(146,174,0,0),16 },
+       { IPv4(146,181,0,0),16 },
+       { IPv4(146,182,0,0),16 },
+       { IPv4(146,186,0,0),16 },
+       { IPv4(146,196,0,0),16 },
+       { IPv4(146,197,0,0),16 },
+       { IPv4(146,202,0,0),16 },
+       { IPv4(146,203,0,0),16 },
+       { IPv4(146,206,0,0),16 },
+       { IPv4(146,208,0,0),16 },
+       { IPv4(146,209,160,0),19 },
+       { IPv4(146,215,64,0),24 },
+       { IPv4(146,215,65,0),24 },
+       { IPv4(146,215,66,0),24 },
+       { IPv4(146,217,0,0),16 },
+       { IPv4(146,218,0,0),16 },
+       { IPv4(146,220,224,0),20 },
+       { IPv4(146,222,13,0),24 },
+       { IPv4(146,222,14,0),24 },
+       { IPv4(146,222,30,0),24 },
+       { IPv4(146,222,31,0),24 },
+       { IPv4(146,222,32,0),24 },
+       { IPv4(146,222,33,0),24 },
+       { IPv4(146,222,34,0),24 },
+       { IPv4(146,222,45,0),24 },
+       { IPv4(146,222,69,0),24 },
+       { IPv4(146,222,156,0),23 },
+       { IPv4(146,222,158,0),24 },
+       { IPv4(146,222,187,0),24 },
+       { IPv4(146,222,188,0),24 },
+       { IPv4(146,222,194,0),24 },
+       { IPv4(146,222,196,0),24 },
+       { IPv4(146,222,197,0),24 },
+       { IPv4(146,223,0,0),16 },
+       { IPv4(146,230,0,0),16 },
+       { IPv4(146,231,0,0),16 },
+       { IPv4(146,232,0,0),16 },
+       { IPv4(146,235,0,0),18 },
+       { IPv4(146,235,64,0),18 },
+       { IPv4(146,235,128,0),18 },
+       { IPv4(146,244,0,0),16 },
+       { IPv4(146,245,0,0),16 },
+       { IPv4(146,246,0,0),16 },
+       { IPv4(147,2,0,0),16 },
+       { IPv4(147,4,0,0),16 },
+       { IPv4(147,4,101,0),24 },
+       { IPv4(147,6,0,0),16 },
+       { IPv4(147,9,0,0),16 },
+       { IPv4(147,16,0,0),16 },
+       { IPv4(147,17,0,0),16 },
+       { IPv4(147,21,0,0),16 },
+       { IPv4(147,24,0,0),16 },
+       { IPv4(147,25,0,0),16 },
+       { IPv4(147,28,0,0),16 },
+       { IPv4(147,35,0,0),16 },
+       { IPv4(147,37,0,0),16 },
+       { IPv4(147,39,0,0),16 },
+       { IPv4(147,40,0,0),16 },
+       { IPv4(147,43,0,0),16 },
+       { IPv4(147,46,0,0),16 },
+       { IPv4(147,51,0,0),16 },
+       { IPv4(147,58,0,0),16 },
+       { IPv4(147,71,0,0),16 },
+       { IPv4(147,72,0,0),16 },
+       { IPv4(147,72,64,0),18 },
+       { IPv4(147,74,0,0),16 },
+       { IPv4(147,78,0,0),16 },
+       { IPv4(147,80,0,0),16 },
+       { IPv4(147,92,0,0),20 },
+       { IPv4(147,92,240,0),20 },
+       { IPv4(147,103,0,0),16 },
+       { IPv4(147,106,0,0),16 },
+       { IPv4(147,118,0,0),16 },
+       { IPv4(147,120,0,0),16 },
+       { IPv4(147,128,68,0),22 },
+       { IPv4(147,129,0,0),16 },
+       { IPv4(147,130,0,0),15 },
+       { IPv4(147,135,0,0),16 },
+       { IPv4(147,137,0,0),16 },
+       { IPv4(147,144,0,0),16 },
+       { IPv4(147,147,0,0),16 },
+       { IPv4(147,148,0,0),14 },
+       { IPv4(147,153,0,0),16 },
+       { IPv4(147,154,0,0),16 },
+       { IPv4(147,155,0,0),16 },
+       { IPv4(147,164,0,0),16 },
+       { IPv4(147,166,0,0),16 },
+       { IPv4(147,169,0,0),16 },
+       { IPv4(147,178,0,0),16 },
+       { IPv4(147,179,0,0),16 },
+       { IPv4(147,182,0,0),16 },
+       { IPv4(147,191,0,0),16 },
+       { IPv4(147,198,0,0),16 },
+       { IPv4(147,202,0,0),16 },
+       { IPv4(147,202,60,0),24 },
+       { IPv4(147,205,0,0),16 },
+       { IPv4(147,208,0,0),19 },
+       { IPv4(147,208,0,0),16 },
+       { IPv4(147,208,128,0),18 },
+       { IPv4(147,208,224,0),19 },
+       { IPv4(147,216,0,0),15 },
+       { IPv4(147,221,0,0),16 },
+       { IPv4(147,222,0,0),16 },
+       { IPv4(147,227,100,0),24 },
+       { IPv4(147,235,0,0),16 },
+       { IPv4(147,235,0,0),17 },
+       { IPv4(147,235,128,0),19 },
+       { IPv4(147,235,192,0),19 },
+       { IPv4(147,235,224,0),22 },
+       { IPv4(147,235,248,0),21 },
+       { IPv4(147,237,232,0),24 },
+       { IPv4(147,238,0,0),16 },
+       { IPv4(147,239,0,0),16 },
+       { IPv4(147,240,0,0),16 },
+       { IPv4(147,241,0,0),16 },
+       { IPv4(147,242,0,0),16 },
+       { IPv4(147,248,0,0),16 },
+       { IPv4(147,249,0,0),16 },
+       { IPv4(148,5,0,0),16 },
+       { IPv4(148,16,0,0),12 },
+       { IPv4(148,55,0,0),16 },
+       { IPv4(148,56,0,0),16 },
+       { IPv4(148,59,0,0),16 },
+       { IPv4(148,70,0,0),16 },
+       { IPv4(148,71,0,0),16 },
+       { IPv4(148,74,0,0),16 },
+       { IPv4(148,75,0,0),16 },
+       { IPv4(148,76,0,0),16 },
+       { IPv4(148,77,0,0),16 },
+       { IPv4(148,78,250,0),24 },
+       { IPv4(148,78,251,0),24 },
+       { IPv4(148,78,252,0),24 },
+       { IPv4(148,78,253,0),24 },
+       { IPv4(148,78,254,0),24 },
+       { IPv4(148,84,0,0),16 },
+       { IPv4(148,87,0,0),19 },
+       { IPv4(148,89,252,0),24 },
+       { IPv4(148,89,253,0),24 },
+       { IPv4(148,89,254,0),24 },
+       { IPv4(148,100,0,0),16 },
+       { IPv4(148,107,0,0),16 },
+       { IPv4(148,107,0,0),19 },
+       { IPv4(148,107,1,0),24 },
+       { IPv4(148,107,3,0),24 },
+       { IPv4(148,107,4,0),24 },
+       { IPv4(148,107,5,0),24 },
+       { IPv4(148,107,6,0),24 },
+       { IPv4(148,107,7,0),24 },
+       { IPv4(148,107,8,0),24 },
+       { IPv4(148,107,9,0),24 },
+       { IPv4(148,107,10,0),24 },
+       { IPv4(148,107,11,0),24 },
+       { IPv4(148,107,12,0),24 },
+       { IPv4(148,107,13,0),24 },
+       { IPv4(148,107,14,0),24 },
+       { IPv4(148,114,0,0),16 },
+       { IPv4(148,115,0,0),16 },
+       { IPv4(148,116,0,0),16 },
+       { IPv4(148,126,0,0),16 },
+       { IPv4(148,133,0,0),16 },
+       { IPv4(148,141,0,0),16 },
+       { IPv4(148,142,0,0),16 },
+       { IPv4(148,146,0,0),16 },
+       { IPv4(148,154,0,0),16 },
+       { IPv4(148,163,0,0),16 },
+       { IPv4(148,163,108,0),24 },
+       { IPv4(148,165,0,0),16 },
+       { IPv4(148,167,0,0),16 },
+       { IPv4(148,168,0,0),16 },
+       { IPv4(148,168,32,0),19 },
+       { IPv4(148,168,96,0),19 },
+       { IPv4(148,176,0,0),16 },
+       { IPv4(148,176,248,0),24 },
+       { IPv4(148,177,0,0),21 },
+       { IPv4(148,177,0,0),16 },
+       { IPv4(148,177,8,0),21 },
+       { IPv4(148,177,128,0),21 },
+       { IPv4(148,183,0,0),16 },
+       { IPv4(148,199,0,0),16 },
+       { IPv4(148,201,0,0),16 },
+       { IPv4(148,203,196,0),24 },
+       { IPv4(148,205,0,0),16 },
+       { IPv4(148,208,128,0),17 },
+       { IPv4(148,208,130,0),24 },
+       { IPv4(148,208,131,0),24 },
+       { IPv4(148,208,132,0),24 },
+       { IPv4(148,208,134,0),24 },
+       { IPv4(148,208,135,0),24 },
+       { IPv4(148,208,137,0),24 },
+       { IPv4(148,208,138,0),24 },
+       { IPv4(148,208,140,0),24 },
+       { IPv4(148,208,141,0),24 },
+       { IPv4(148,208,143,0),24 },
+       { IPv4(148,208,144,0),24 },
+       { IPv4(148,208,145,0),24 },
+       { IPv4(148,208,146,0),24 },
+       { IPv4(148,208,147,0),24 },
+       { IPv4(148,208,148,0),24 },
+       { IPv4(148,208,149,0),24 },
+       { IPv4(148,208,150,0),24 },
+       { IPv4(148,208,151,0),24 },
+       { IPv4(148,208,152,0),24 },
+       { IPv4(148,208,153,0),24 },
+       { IPv4(148,208,154,0),24 },
+       { IPv4(148,208,155,0),24 },
+       { IPv4(148,208,156,0),24 },
+       { IPv4(148,208,157,0),24 },
+       { IPv4(148,208,159,0),24 },
+       { IPv4(148,208,161,0),24 },
+       { IPv4(148,208,162,0),24 },
+       { IPv4(148,208,163,0),24 },
+       { IPv4(148,208,164,0),24 },
+       { IPv4(148,208,165,0),24 },
+       { IPv4(148,208,166,0),24 },
+       { IPv4(148,208,167,0),24 },
+       { IPv4(148,208,168,0),24 },
+       { IPv4(148,208,169,0),24 },
+       { IPv4(148,208,170,0),24 },
+       { IPv4(148,208,171,0),24 },
+       { IPv4(148,208,172,0),24 },
+       { IPv4(148,208,174,0),24 },
+       { IPv4(148,208,175,0),24 },
+       { IPv4(148,208,176,0),24 },
+       { IPv4(148,208,177,0),24 },
+       { IPv4(148,208,178,0),24 },
+       { IPv4(148,208,179,0),24 },
+       { IPv4(148,208,180,0),24 },
+       { IPv4(148,208,181,0),24 },
+       { IPv4(148,208,182,0),24 },
+       { IPv4(148,208,183,0),24 },
+       { IPv4(148,208,184,0),24 },
+       { IPv4(148,208,185,0),24 },
+       { IPv4(148,208,186,0),24 },
+       { IPv4(148,208,187,0),24 },
+       { IPv4(148,208,188,0),24 },
+       { IPv4(148,208,189,0),24 },
+       { IPv4(148,208,190,0),24 },
+       { IPv4(148,208,191,0),24 },
+       { IPv4(148,208,192,0),24 },
+       { IPv4(148,208,194,0),24 },
+       { IPv4(148,208,195,0),24 },
+       { IPv4(148,208,196,0),24 },
+       { IPv4(148,208,198,0),24 },
+       { IPv4(148,208,199,0),24 },
+       { IPv4(148,208,200,0),24 },
+       { IPv4(148,208,204,0),24 },
+       { IPv4(148,208,205,0),24 },
+       { IPv4(148,208,206,0),24 },
+       { IPv4(148,208,207,0),24 },
+       { IPv4(148,208,210,0),24 },
+       { IPv4(148,208,214,0),24 },
+       { IPv4(148,208,215,0),24 },
+       { IPv4(148,208,216,0),24 },
+       { IPv4(148,208,217,0),24 },
+       { IPv4(148,208,218,0),24 },
+       { IPv4(148,208,219,0),24 },
+       { IPv4(148,208,220,0),24 },
+       { IPv4(148,208,221,0),24 },
+       { IPv4(148,208,222,0),24 },
+       { IPv4(148,208,223,0),24 },
+       { IPv4(148,208,224,0),24 },
+       { IPv4(148,208,227,0),24 },
+       { IPv4(148,208,228,0),24 },
+       { IPv4(148,208,229,0),24 },
+       { IPv4(148,208,230,0),24 },
+       { IPv4(148,208,231,0),24 },
+       { IPv4(148,208,232,0),24 },
+       { IPv4(148,208,234,0),24 },
+       { IPv4(148,208,236,0),24 },
+       { IPv4(148,208,237,0),24 },
+       { IPv4(148,208,238,0),24 },
+       { IPv4(148,208,239,0),24 },
+       { IPv4(148,208,240,0),24 },
+       { IPv4(148,208,241,0),24 },
+       { IPv4(148,208,242,0),24 },
+       { IPv4(148,208,243,0),24 },
+       { IPv4(148,208,246,0),24 },
+       { IPv4(148,208,247,0),24 },
+       { IPv4(148,208,248,0),24 },
+       { IPv4(148,208,250,0),24 },
+       { IPv4(148,208,251,0),24 },
+       { IPv4(148,208,252,0),24 },
+       { IPv4(148,208,254,0),24 },
+       { IPv4(148,209,0,0),16 },
+       { IPv4(148,210,0,0),16 },
+       { IPv4(148,211,0,0),16 },
+       { IPv4(148,214,0,0),16 },
+       { IPv4(148,215,0,0),16 },
+       { IPv4(148,216,0,0),16 },
+       { IPv4(148,218,0,0),16 },
+       { IPv4(148,219,0,0),16 },
+       { IPv4(148,220,0,0),16 },
+       { IPv4(148,221,0,0),19 },
+       { IPv4(148,221,0,0),16 },
+       { IPv4(148,221,32,0),19 },
+       { IPv4(148,221,64,0),19 },
+       { IPv4(148,221,96,0),19 },
+       { IPv4(148,221,128,0),18 },
+       { IPv4(148,221,192,0),18 },
+       { IPv4(148,222,0,0),16 },
+       { IPv4(148,223,0,0),18 },
+       { IPv4(148,223,0,0),16 },
+       { IPv4(148,223,64,0),19 },
+       { IPv4(148,223,96,0),20 },
+       { IPv4(148,223,112,0),20 },
+       { IPv4(148,223,128,0),18 },
+       { IPv4(148,223,152,0),24 },
+       { IPv4(148,223,154,0),24 },
+       { IPv4(148,223,192,0),19 },
+       { IPv4(148,223,224,0),19 },
+       { IPv4(148,224,6,0),24 },
+       { IPv4(148,227,0,0),16 },
+       { IPv4(148,230,0,0),16 },
+       { IPv4(148,233,0,0),16 },
+       { IPv4(148,233,0,0),19 },
+       { IPv4(148,233,32,0),19 },
+       { IPv4(148,233,64,0),19 },
+       { IPv4(148,233,71,0),24 },
+       { IPv4(148,233,77,0),24 },
+       { IPv4(148,233,96,0),19 },
+       { IPv4(148,233,128,0),18 },
+       { IPv4(148,233,148,0),24 },
+       { IPv4(148,233,152,0),24 },
+       { IPv4(148,233,192,0),18 },
+       { IPv4(148,233,241,0),24 },
+       { IPv4(148,234,0,0),16 },
+       { IPv4(148,235,0,0),16 },
+       { IPv4(148,235,0,0),19 },
+       { IPv4(148,235,32,0),19 },
+       { IPv4(148,235,64,0),19 },
+       { IPv4(148,235,96,0),19 },
+       { IPv4(148,235,128,0),18 },
+       { IPv4(148,235,192,0),18 },
+       { IPv4(148,236,0,0),16 },
+       { IPv4(148,237,0,0),16 },
+       { IPv4(148,238,0,0),16 },
+       { IPv4(148,239,0,0),16 },
+       { IPv4(148,241,0,0),19 },
+       { IPv4(148,241,32,0),19 },
+       { IPv4(148,241,64,0),19 },
+       { IPv4(148,242,0,0),16 },
+       { IPv4(148,243,64,0),21 },
+       { IPv4(148,244,0,0),17 },
+       { IPv4(148,244,0,0),16 },
+       { IPv4(148,244,0,0),18 },
+       { IPv4(148,244,128,0),17 },
+       { IPv4(148,245,228,0),24 },
+       { IPv4(148,246,0,0),16 },
+       { IPv4(148,248,0,0),16 },
+       { IPv4(148,248,250,0),24 },
+       { IPv4(148,249,0,0),16 },
+       { IPv4(149,1,0,0),16 },
+       { IPv4(149,2,22,0),24 },
+       { IPv4(149,2,24,0),23 },
+       { IPv4(149,2,28,0),24 },
+       { IPv4(149,2,32,0),21 },
+       { IPv4(149,2,78,0),24 },
+       { IPv4(149,2,80,0),24 },
+       { IPv4(149,2,121,0),24 },
+       { IPv4(149,2,122,0),24 },
+       { IPv4(149,2,123,0),24 },
+       { IPv4(149,2,132,0),24 },
+       { IPv4(149,2,143,0),24 },
+       { IPv4(149,4,0,0),16 },
+       { IPv4(149,15,0,0),16 },
+       { IPv4(149,28,0,0),16 },
+       { IPv4(149,28,0,0),20 },
+       { IPv4(149,31,0,0),16 },
+       { IPv4(149,43,0,0),16 },
+       { IPv4(149,46,0,0),24 },
+       { IPv4(149,48,0,0),16 },
+       { IPv4(149,54,0,0),16 },
+       { IPv4(149,58,0,0),16 },
+       { IPv4(149,61,0,0),16 },
+       { IPv4(149,63,0,0),16 },
+       { IPv4(149,64,0,0),16 },
+       { IPv4(149,65,0,0),16 },
+       { IPv4(149,68,0,0),16 },
+       { IPv4(149,70,0,0),16 },
+       { IPv4(149,81,0,0),16 },
+       { IPv4(149,83,208,0),24 },
+       { IPv4(149,84,0,0),16 },
+       { IPv4(149,89,0,0),16 },
+       { IPv4(149,105,0,0),16 },
+       { IPv4(149,114,0,0),16 },
+       { IPv4(149,119,0,0),16 },
+       { IPv4(149,123,0,0),16 },
+       { IPv4(149,123,254,0),24 },
+       { IPv4(149,125,0,0),16 },
+       { IPv4(149,134,0,0),16 },
+       { IPv4(149,137,0,0),16 },
+       { IPv4(149,142,0,0),16 },
+       { IPv4(149,145,0,0),16 },
+       { IPv4(149,158,0,0),16 },
+       { IPv4(149,159,0,0),16 },
+       { IPv4(149,169,0,0),16 },
+       { IPv4(149,172,0,0),17 },
+       { IPv4(149,172,0,0),16 },
+       { IPv4(149,172,128,0),17 },
+       { IPv4(149,172,150,0),24 },
+       { IPv4(149,173,0,0),16 },
+       { IPv4(149,174,0,0),16 },
+       { IPv4(149,199,0,0),16 },
+       { IPv4(149,206,0,0),16 },
+       { IPv4(149,211,0,0),16 },
+       { IPv4(149,214,0,0),16 },
+       { IPv4(149,221,0,0),16 },
+       { IPv4(149,236,0,0),16 },
+       { IPv4(149,239,0,0),16 },
+       { IPv4(149,242,212,0),24 },
+       { IPv4(149,244,0,0),16 },
+       { IPv4(150,18,0,0),16 },
+       { IPv4(150,19,0,0),16 },
+       { IPv4(150,26,0,0),16 },
+       { IPv4(150,29,0,0),16 },
+       { IPv4(150,32,0,0),16 },
+       { IPv4(150,48,240,0),23 },
+       { IPv4(150,52,0,0),16 },
+       { IPv4(150,61,0,0),16 },
+       { IPv4(150,63,0,0),16 },
+       { IPv4(150,65,0,0),16 },
+       { IPv4(150,70,32,0),22 },
+       { IPv4(150,82,0,0),16 },
+       { IPv4(150,91,0,0),16 },
+       { IPv4(150,105,0,0),16 },
+       { IPv4(150,105,16,0),20 },
+       { IPv4(150,105,32,0),20 },
+       { IPv4(150,112,0,0),16 },
+       { IPv4(150,113,0,0),16 },
+       { IPv4(150,114,0,0),16 },
+       { IPv4(150,131,0,0),16 },
+       { IPv4(150,133,0,0),16 },
+       { IPv4(150,135,0,0),16 },
+       { IPv4(150,137,0,0),16 },
+       { IPv4(150,142,0,0),16 },
+       { IPv4(150,143,0,0),16 },
+       { IPv4(150,144,0,0),16 },
+       { IPv4(150,149,0,0),16 },
+       { IPv4(150,150,0,0),16 },
+       { IPv4(150,152,0,0),16 },
+       { IPv4(150,155,0,0),16 },
+       { IPv4(150,156,0,0),16 },
+       { IPv4(150,167,0,0),16 },
+       { IPv4(150,177,0,0),16 },
+       { IPv4(150,180,0,0),16 },
+       { IPv4(150,183,0,0),16 },
+       { IPv4(150,183,10,0),24 },
+       { IPv4(150,183,92,0),24 },
+       { IPv4(150,184,0,0),16 },
+       { IPv4(150,185,128,0),18 },
+       { IPv4(150,190,0,0),16 },
+       { IPv4(150,192,0,0),15 },
+       { IPv4(150,195,0,0),16 },
+       { IPv4(150,197,0,0),16 },
+       { IPv4(150,199,0,0),16 },
+       { IPv4(150,200,0,0),16 },
+       { IPv4(150,201,0,0),16 },
+       { IPv4(150,202,0,0),16 },
+       { IPv4(150,202,8,0),24 },
+       { IPv4(150,209,0,0),16 },
+       { IPv4(150,210,0,0),16 },
+       { IPv4(150,220,10,0),24 },
+       { IPv4(150,225,0,0),16 },
+       { IPv4(150,226,0,0),16 },
+       { IPv4(150,228,0,0),16 },
+       { IPv4(150,231,0,0),16 },
+       { IPv4(150,232,0,0),16 },
+       { IPv4(150,243,0,0),16 },
+       { IPv4(150,250,0,0),16 },
+       { IPv4(150,253,0,0),16 },
+       { IPv4(151,87,0,0),16 },
+       { IPv4(151,96,0,0),16 },
+       { IPv4(151,110,206,0),24 },
+       { IPv4(151,111,0,0),16 },
+       { IPv4(151,113,0,0),16 },
+       { IPv4(151,118,0,0),16 },
+       { IPv4(151,120,0,0),16 },
+       { IPv4(151,124,0,0),16 },
+       { IPv4(151,125,0,0),16 },
+       { IPv4(151,126,0,0),16 },
+       { IPv4(151,140,0,0),16 },
+       { IPv4(151,142,218,0),24 },
+       { IPv4(151,148,0,0),16 },
+       { IPv4(151,153,0,0),16 },
+       { IPv4(151,155,0,0),16 },
+       { IPv4(151,163,0,0),16 },
+       { IPv4(151,163,2,0),24 },
+       { IPv4(151,163,56,0),24 },
+       { IPv4(151,163,57,0),24 },
+       { IPv4(151,164,88,0),24 },
+       { IPv4(151,164,169,0),24 },
+       { IPv4(151,164,170,0),23 },
+       { IPv4(151,164,172,0),23 },
+       { IPv4(151,164,174,0),24 },
+       { IPv4(151,164,230,0),24 },
+       { IPv4(151,164,231,0),24 },
+       { IPv4(151,166,0,0),16 },
+       { IPv4(151,186,0,0),16 },
+       { IPv4(151,190,0,0),16 },
+       { IPv4(151,193,0,0),16 },
+       { IPv4(151,195,0,0),16 },
+       { IPv4(151,210,0,0),16 },
+       { IPv4(151,212,0,0),16 },
+       { IPv4(152,61,0,0),16 },
+       { IPv4(152,61,1,0),24 },
+       { IPv4(152,67,0,0),16 },
+       { IPv4(152,67,13,0),24 },
+       { IPv4(152,67,109,0),24 },
+       { IPv4(152,67,220,0),22 },
+       { IPv4(152,67,224,0),23 },
+       { IPv4(152,67,226,0),24 },
+       { IPv4(152,76,0,0),16 },
+       { IPv4(152,79,0,0),16 },
+       { IPv4(152,80,0,0),16 },
+       { IPv4(152,85,0,0),16 },
+       { IPv4(152,85,2,0),24 },
+       { IPv4(152,85,3,0),24 },
+       { IPv4(152,86,0,0),16 },
+       { IPv4(152,87,0,0),16 },
+       { IPv4(152,99,0,0),16 },
+       { IPv4(152,99,0,0),17 },
+       { IPv4(152,99,128,0),18 },
+       { IPv4(152,99,192,0),18 },
+       { IPv4(152,104,224,0),19 },
+       { IPv4(152,107,0,0),16 },
+       { IPv4(152,110,0,0),16 },
+       { IPv4(152,111,0,0),16 },
+       { IPv4(152,112,0,0),16 },
+       { IPv4(152,114,0,0),16 },
+       { IPv4(152,131,100,0),22 },
+       { IPv4(152,131,104,0),24 },
+       { IPv4(152,131,110,0),23 },
+       { IPv4(152,131,112,0),23 },
+       { IPv4(152,131,114,0),24 },
+       { IPv4(152,137,0,0),16 },
+       { IPv4(152,149,0,0),16 },
+       { IPv4(152,158,0,0),16 },
+       { IPv4(152,158,160,0),19 },
+       { IPv4(152,158,192,0),18 },
+       { IPv4(152,160,0,0),16 },
+       { IPv4(152,163,0,0),20 },
+       { IPv4(152,163,0,0),16 },
+       { IPv4(152,229,0,0),16 },
+       { IPv4(153,2,0,0),16 },
+       { IPv4(153,2,228,0),24 },
+       { IPv4(153,2,229,0),24 },
+       { IPv4(153,2,230,0),24 },
+       { IPv4(153,2,231,0),24 },
+       { IPv4(153,2,234,0),24 },
+       { IPv4(153,2,244,0),24 },
+       { IPv4(153,2,247,0),24 },
+       { IPv4(153,4,0,0),16 },
+       { IPv4(153,9,0,0),16 },
+       { IPv4(153,10,0,0),16 },
+       { IPv4(153,11,0,0),16 },
+       { IPv4(153,18,0,0),16 },
+       { IPv4(153,20,0,0),16 },
+       { IPv4(153,24,0,0),14 },
+       { IPv4(153,33,0,0),16 },
+       { IPv4(153,45,0,0),16 },
+       { IPv4(153,46,0,0),16 },
+       { IPv4(153,69,0,0),24 },
+       { IPv4(153,69,128,0),24 },
+       { IPv4(153,91,0,0),16 },
+       { IPv4(153,102,0,0),16 },
+       { IPv4(153,103,0,0),16 },
+       { IPv4(153,105,0,0),16 },
+       { IPv4(154,2,0,0),16 },
+       { IPv4(155,5,0,0),16 },
+       { IPv4(155,6,0,0),16 },
+       { IPv4(155,8,0,0),15 },
+       { IPv4(155,14,0,0),16 },
+       { IPv4(155,36,0,0),16 },
+       { IPv4(155,41,0,0),17 },
+       { IPv4(155,48,0,0),16 },
+       { IPv4(155,53,0,0),16 },
+       { IPv4(155,59,2,0),24 },
+       { IPv4(155,60,0,0),16 },
+       { IPv4(155,68,0,0),16 },
+       { IPv4(155,72,0,0),16 },
+       { IPv4(155,72,145,0),24 },
+       { IPv4(155,72,147,0),24 },
+       { IPv4(155,72,148,0),22 },
+       { IPv4(155,91,0,0),16 },
+       { IPv4(155,91,2,0),24 },
+       { IPv4(155,91,4,0),24 },
+       { IPv4(155,91,6,0),24 },
+       { IPv4(155,91,8,0),24 },
+       { IPv4(155,91,16,0),24 },
+       { IPv4(155,91,17,0),24 },
+       { IPv4(155,94,0,0),16 },
+       { IPv4(155,94,104,0),21 },
+       { IPv4(155,95,0,0),16 },
+       { IPv4(155,99,0,0),16 },
+       { IPv4(155,100,0,0),16 },
+       { IPv4(155,101,0,0),16 },
+       { IPv4(155,106,0,0),16 },
+       { IPv4(155,131,0,0),16 },
+       { IPv4(155,131,0,0),19 },
+       { IPv4(155,131,96,0),19 },
+       { IPv4(155,135,0,0),16 },
+       { IPv4(155,136,0,0),16 },
+       { IPv4(155,141,0,0),16 },
+       { IPv4(155,147,0,0),16 },
+       { IPv4(155,147,25,0),24 },
+       { IPv4(155,148,0,0),16 },
+       { IPv4(155,149,0,0),16 },
+       { IPv4(155,150,0,0),15 },
+       { IPv4(155,152,0,0),14 },
+       { IPv4(155,152,120,0),21 },
+       { IPv4(155,159,0,0),16 },
+       { IPv4(155,161,0,0),16 },
+       { IPv4(155,162,0,0),15 },
+       { IPv4(155,164,0,0),14 },
+       { IPv4(155,168,0,0),15 },
+       { IPv4(155,170,0,0),16 },
+       { IPv4(155,170,0,0),17 },
+       { IPv4(155,173,0,0),16 },
+       { IPv4(155,174,0,0),16 },
+       { IPv4(155,176,0,0),16 },
+       { IPv4(155,177,174,0),24 },
+       { IPv4(155,182,104,0),24 },
+       { IPv4(155,192,0,0),17 },
+       { IPv4(155,192,0,0),16 },
+       { IPv4(155,192,160,0),19 },
+       { IPv4(155,201,0,0),16 },
+       { IPv4(155,201,35,0),24 },
+       { IPv4(155,201,36,0),24 },
+       { IPv4(155,201,63,0),24 },
+       { IPv4(155,201,64,0),18 },
+       { IPv4(155,201,128,0),18 },
+       { IPv4(155,201,139,0),24 },
+       { IPv4(155,201,240,0),24 },
+       { IPv4(155,201,241,0),24 },
+       { IPv4(155,201,242,0),24 },
+       { IPv4(155,201,243,0),24 },
+       { IPv4(155,201,244,0),24 },
+       { IPv4(155,201,245,0),24 },
+       { IPv4(155,201,246,0),24 },
+       { IPv4(155,202,0,0),16 },
+       { IPv4(155,202,254,0),24 },
+       { IPv4(155,208,0,0),16 },
+       { IPv4(155,211,0,0),16 },
+       { IPv4(155,211,112,0),24 },
+       { IPv4(155,211,128,0),24 },
+       { IPv4(155,211,251,0),24 },
+       { IPv4(155,212,0,0),16 },
+       { IPv4(155,213,0,0),16 },
+       { IPv4(155,214,0,0),15 },
+       { IPv4(155,216,0,0),14 },
+       { IPv4(155,218,0,0),16 },
+       { IPv4(155,223,0,0),16 },
+       { IPv4(155,225,0,0),16 },
+       { IPv4(155,226,0,0),16 },
+       { IPv4(155,230,0,0),16 },
+       { IPv4(155,232,0,0),16 },
+       { IPv4(155,234,0,0),16 },
+       { IPv4(155,235,0,0),16 },
+       { IPv4(155,236,150,0),23 },
+       { IPv4(155,236,152,0),24 },
+       { IPv4(155,237,0,0),16 },
+       { IPv4(155,238,0,0),16 },
+       { IPv4(155,239,0,0),16 },
+       { IPv4(155,240,0,0),16 },
+       { IPv4(155,244,0,0),16 },
+       { IPv4(155,250,0,0),16 },
+       { IPv4(155,252,0,0),24 },
+       { IPv4(155,252,0,0),16 },
+       { IPv4(155,252,1,0),24 },
+       { IPv4(155,252,2,0),24 },
+       { IPv4(155,252,4,0),22 },
+       { IPv4(155,252,16,0),20 },
+       { IPv4(155,252,64,0),21 },
+       { IPv4(155,252,72,0),21 },
+       { IPv4(155,252,80,0),21 },
+       { IPv4(155,252,88,0),21 },
+       { IPv4(155,252,96,0),21 },
+       { IPv4(155,252,104,0),22 },
+       { IPv4(155,252,112,0),22 },
+       { IPv4(155,252,116,0),22 },
+       { IPv4(155,252,128,0),21 },
+       { IPv4(155,252,140,0),22 },
+       { IPv4(155,252,144,0),21 },
+       { IPv4(155,252,152,0),22 },
+       { IPv4(155,252,158,0),24 },
+       { IPv4(155,252,160,0),22 },
+       { IPv4(155,252,164,0),23 },
+       { IPv4(155,252,192,0),21 },
+       { IPv4(155,252,204,0),22 },
+       { IPv4(155,252,224,0),20 },
+       { IPv4(155,252,240,0),21 },
+       { IPv4(155,252,248,0),22 },
+       { IPv4(155,252,252,0),24 },
+       { IPv4(156,6,0,0),16 },
+       { IPv4(156,7,0,0),16 },
+       { IPv4(156,8,0,0),16 },
+       { IPv4(156,19,0,0),16 },
+       { IPv4(156,29,0,0),16 },
+       { IPv4(156,42,0,0),16 },
+       { IPv4(156,46,25,0),24 },
+       { IPv4(156,46,140,0),22 },
+       { IPv4(156,55,0,0),16 },
+       { IPv4(156,55,126,0),23 },
+       { IPv4(156,55,128,0),22 },
+       { IPv4(156,55,132,0),22 },
+       { IPv4(156,56,0,0),16 },
+       { IPv4(156,59,0,0),16 },
+       { IPv4(156,61,0,0),16 },
+       { IPv4(156,62,0,0),16 },
+       { IPv4(156,63,0,0),16 },
+       { IPv4(156,68,0,0),16 },
+       { IPv4(156,77,0,0),16 },
+       { IPv4(156,77,64,0),19 },
+       { IPv4(156,79,0,0),16 },
+       { IPv4(156,98,0,0),15 },
+       { IPv4(156,107,168,0),23 },
+       { IPv4(156,111,0,0),16 },
+       { IPv4(156,114,200,0),24 },
+       { IPv4(156,140,0,0),16 },
+       { IPv4(156,145,0,0),16 },
+       { IPv4(156,147,0,0),16 },
+       { IPv4(156,152,0,0),15 },
+       { IPv4(156,152,224,0),24 },
+       { IPv4(156,153,37,0),24 },
+       { IPv4(157,2,0,0),16 },
+       { IPv4(157,22,0,0),16 },
+       { IPv4(157,22,112,0),20 },
+       { IPv4(157,22,208,0),23 },
+       { IPv4(157,22,237,0),24 },
+       { IPv4(157,28,0,0),15 },
+       { IPv4(157,64,0,0),16 },
+       { IPv4(157,66,0,0),16 },
+       { IPv4(157,71,0,0),16 },
+       { IPv4(157,72,0,0),16 },
+       { IPv4(157,73,0,0),16 },
+       { IPv4(157,74,0,0),16 },
+       { IPv4(157,75,0,0),16 },
+       { IPv4(157,77,0,0),16 },
+       { IPv4(157,79,0,0),16 },
+       { IPv4(157,84,0,0),16 },
+       { IPv4(157,89,0,0),16 },
+       { IPv4(157,92,0,0),16 },
+       { IPv4(157,100,1,0),24 },
+       { IPv4(157,100,2,0),24 },
+       { IPv4(157,100,8,0),24 },
+       { IPv4(157,100,16,0),24 },
+       { IPv4(157,100,21,0),24 },
+       { IPv4(157,100,24,0),24 },
+       { IPv4(157,100,25,0),24 },
+       { IPv4(157,100,27,0),24 },
+       { IPv4(157,100,28,0),24 },
+       { IPv4(157,100,29,0),24 },
+       { IPv4(157,100,33,0),24 },
+       { IPv4(157,100,37,0),24 },
+       { IPv4(157,100,45,0),24 },
+       { IPv4(157,100,46,0),24 },
+       { IPv4(157,100,50,0),24 },
+       { IPv4(157,100,58,0),24 },
+       { IPv4(157,100,59,0),24 },
+       { IPv4(157,100,61,0),24 },
+       { IPv4(157,100,71,0),24 },
+       { IPv4(157,100,72,0),24 },
+       { IPv4(157,100,97,0),24 },
+       { IPv4(157,100,98,0),24 },
+       { IPv4(157,100,100,0),24 },
+       { IPv4(157,100,103,0),24 },
+       { IPv4(157,100,104,0),24 },
+       { IPv4(157,100,111,0),24 },
+       { IPv4(157,100,112,0),24 },
+       { IPv4(157,100,113,0),24 },
+       { IPv4(157,100,125,0),24 },
+       { IPv4(157,100,136,0),24 },
+       { IPv4(157,100,141,0),24 },
+       { IPv4(157,100,144,0),24 },
+       { IPv4(157,100,147,0),24 },
+       { IPv4(157,100,158,0),24 },
+       { IPv4(157,100,165,0),24 },
+       { IPv4(157,100,183,0),24 },
+       { IPv4(157,100,217,0),24 },
+       { IPv4(157,100,251,0),24 },
+       { IPv4(157,109,0,0),16 },
+       { IPv4(157,111,0,0),16 },
+       { IPv4(157,120,0,0),16 },
+       { IPv4(157,125,0,0),16 },
+       { IPv4(157,127,0,0),16 },
+       { IPv4(157,128,0,0),16 },
+       { IPv4(157,132,0,0),16 },
+       { IPv4(157,139,0,0),16 },
+       { IPv4(157,141,0,0),16 },
+       { IPv4(157,151,0,0),16 },
+       { IPv4(157,154,0,0),16 },
+       { IPv4(157,162,0,0),16 },
+       { IPv4(157,165,0,0),16 },
+       { IPv4(157,176,0,0),16 },
+       { IPv4(157,178,0,0),16 },
+       { IPv4(157,179,0,0),20 },
+       { IPv4(157,179,16,0),24 },
+       { IPv4(157,187,0,0),16 },
+       { IPv4(157,187,16,0),20 },
+       { IPv4(157,187,32,0),20 },
+       { IPv4(157,187,48,0),20 },
+       { IPv4(157,198,0,0),16 },
+       { IPv4(157,199,0,0),16 },
+       { IPv4(157,201,0,0),16 },
+       { IPv4(157,205,0,0),16 },
+       { IPv4(157,205,128,0),17 },
+       { IPv4(157,209,0,0),16 },
+       { IPv4(157,226,0,0),16 },
+       { IPv4(157,229,0,0),16 },
+       { IPv4(157,231,16,0),24 },
+       { IPv4(157,238,0,0),16 },
+       { IPv4(157,252,0,0),16 },
+       { IPv4(158,0,0,0),13 },
+       { IPv4(158,2,0,0),16 },
+       { IPv4(158,8,0,0),14 },
+       { IPv4(158,12,0,0),16 },
+       { IPv4(158,14,0,0),15 },
+       { IPv4(158,16,0,0),14 },
+       { IPv4(158,20,0,0),16 },
+       { IPv4(158,44,20,0),22 },
+       { IPv4(158,44,24,0),23 },
+       { IPv4(158,44,26,0),24 },
+       { IPv4(158,52,0,0),16 },
+       { IPv4(158,54,0,0),16 },
+       { IPv4(158,57,0,0),16 },
+       { IPv4(158,73,0,0),16 },
+       { IPv4(158,81,0,0),17 },
+       { IPv4(158,81,128,0),17 },
+       { IPv4(158,83,0,0),16 },
+       { IPv4(158,91,0,0),16 },
+       { IPv4(158,93,0,0),16 },
+       { IPv4(158,97,0,0),16 },
+       { IPv4(158,100,0,0),16 },
+       { IPv4(158,102,0,0),16 },
+       { IPv4(158,107,0,0),16 },
+       { IPv4(158,107,48,0),22 },
+       { IPv4(158,113,0,0),16 },
+       { IPv4(158,114,0,0),16 },
+       { IPv4(158,116,131,0),24 },
+       { IPv4(158,118,10,0),24 },
+       { IPv4(158,118,11,0),24 },
+       { IPv4(158,122,0,0),16 },
+       { IPv4(158,130,0,0),16 },
+       { IPv4(158,132,0,0),16 },
+       { IPv4(158,135,0,0),16 },
+       { IPv4(158,142,0,0),16 },
+       { IPv4(158,151,0,0),16 },
+       { IPv4(158,152,0,0),16 },
+       { IPv4(158,153,0,0),16 },
+       { IPv4(158,154,0,0),16 },
+       { IPv4(158,157,0,0),16 },
+       { IPv4(158,158,0,0),16 },
+       { IPv4(158,161,0,0),16 },
+       { IPv4(158,171,192,0),24 },
+       { IPv4(158,171,193,0),24 },
+       { IPv4(158,171,194,0),24 },
+       { IPv4(158,171,195,0),24 },
+       { IPv4(158,171,210,0),24 },
+       { IPv4(158,171,211,0),24 },
+       { IPv4(158,201,0,0),16 },
+       { IPv4(158,203,0,0),16 },
+       { IPv4(158,210,0,0),16 },
+       { IPv4(158,222,224,0),20 },
+       { IPv4(158,236,0,0),14 },
+       { IPv4(158,239,0,0),16 },
+       { IPv4(158,240,0,0),14 },
+       { IPv4(158,240,0,0),16 },
+       { IPv4(158,244,0,0),16 },
+       { IPv4(158,245,0,0),16 },
+       { IPv4(158,247,0,0),16 },
+       { IPv4(158,251,0,0),16 },
+       { IPv4(159,7,135,0),24 },
+       { IPv4(159,12,0,0),16 },
+       { IPv4(159,16,0,0),16 },
+       { IPv4(159,21,0,0),16 },
+       { IPv4(159,33,0,0),16 },
+       { IPv4(159,49,0,0),16 },
+       { IPv4(159,53,0,0),16 },
+       { IPv4(159,62,0,0),16 },
+       { IPv4(159,71,0,0),16 },
+       { IPv4(159,75,0,0),16 },
+       { IPv4(159,77,0,0),16 },
+       { IPv4(159,82,0,0),16 },
+       { IPv4(159,83,0,0),16 },
+       { IPv4(159,99,0,0),16 },
+       { IPv4(159,104,6,0),24 },
+       { IPv4(159,104,7,0),24 },
+       { IPv4(159,108,0,0),16 },
+       { IPv4(159,113,0,0),16 },
+       { IPv4(159,115,0,0),16 },
+       { IPv4(159,115,14,0),24 },
+       { IPv4(159,119,0,0),16 },
+       { IPv4(159,120,0,0),16 },
+       { IPv4(159,124,0,0),16 },
+       { IPv4(159,133,0,0),16 },
+       { IPv4(159,137,0,0),16 },
+       { IPv4(159,140,0,0),16 },
+       { IPv4(159,140,174,0),24 },
+       { IPv4(159,140,213,0),24 },
+       { IPv4(159,140,214,0),24 },
+       { IPv4(159,140,218,0),24 },
+       { IPv4(159,140,219,0),24 },
+       { IPv4(159,140,244,0),24 },
+       { IPv4(159,140,254,0),24 },
+       { IPv4(159,143,0,0),16 },
+       { IPv4(159,153,0,0),17 },
+       { IPv4(159,153,0,0),16 },
+       { IPv4(159,153,128,0),19 },
+       { IPv4(159,153,160,0),21 },
+       { IPv4(159,153,192,0),19 },
+       { IPv4(159,153,224,0),19 },
+       { IPv4(159,157,16,0),24 },
+       { IPv4(159,157,254,0),24 },
+       { IPv4(159,182,0,0),16 },
+       { IPv4(159,189,0,0),16 },
+       { IPv4(159,199,0,0),16 },
+       { IPv4(159,204,0,0),16 },
+       { IPv4(159,212,0,0),16 },
+       { IPv4(159,221,0,0),16 },
+       { IPv4(159,223,0,0),16 },
+       { IPv4(159,226,0,0),16 },
+       { IPv4(159,240,0,0),16 },
+       { IPv4(159,247,0,0),16 },
+       { IPv4(159,251,0,0),16 },
+       { IPv4(160,7,0,0),16 },
+       { IPv4(160,10,0,0),16 },
+       { IPv4(160,23,0,0),16 },
+       { IPv4(160,33,0,0),19 },
+       { IPv4(160,33,0,0),16 },
+       { IPv4(160,39,0,0),16 },
+       { IPv4(160,41,0,0),16 },
+       { IPv4(160,42,0,0),16 },
+       { IPv4(160,43,0,0),16 },
+       { IPv4(160,54,0,0),15 },
+       { IPv4(160,56,0,0),15 },
+       { IPv4(160,58,0,0),16 },
+       { IPv4(160,69,0,0),23 },
+       { IPv4(160,79,0,0),16 },
+       { IPv4(160,79,80,0),24 },
+       { IPv4(160,79,190,0),23 },
+       { IPv4(160,79,198,0),23 },
+       { IPv4(160,79,214,0),23 },
+       { IPv4(160,79,216,0),23 },
+       { IPv4(160,79,224,0),22 },
+       { IPv4(160,79,240,0),22 },
+       { IPv4(160,79,248,0),22 },
+       { IPv4(160,83,32,0),19 },
+       { IPv4(160,87,0,0),16 },
+       { IPv4(160,91,0,0),16 },
+       { IPv4(160,93,0,0),16 },
+       { IPv4(160,94,0,0),16 },
+       { IPv4(160,96,0,0),16 },
+       { IPv4(160,115,0,0),16 },
+       { IPv4(160,118,0,0),16 },
+       { IPv4(160,123,0,0),16 },
+       { IPv4(160,125,0,0),16 },
+       { IPv4(160,126,0,0),15 },
+       { IPv4(160,128,0,0),18 },
+       { IPv4(160,129,0,0),16 },
+       { IPv4(160,132,0,0),15 },
+       { IPv4(160,134,0,0),16 },
+       { IPv4(160,135,0,0),16 },
+       { IPv4(160,136,0,0),13 },
+       { IPv4(160,144,0,0),13 },
+       { IPv4(160,147,0,0),16 },
+       { IPv4(160,189,0,0),16 },
+       { IPv4(160,190,0,0),16 },
+       { IPv4(160,192,0,0),16 },
+       { IPv4(160,194,0,0),16 },
+       { IPv4(160,199,0,0),16 },
+       { IPv4(160,201,0,0),16 },
+       { IPv4(160,202,0,0),16 },
+       { IPv4(160,205,0,0),16 },
+       { IPv4(160,206,0,0),16 },
+       { IPv4(160,207,0,0),16 },
+       { IPv4(160,211,0,0),16 },
+       { IPv4(160,212,0,0),16 },
+       { IPv4(160,219,0,0),16 },
+       { IPv4(160,221,0,0),16 },
+       { IPv4(160,227,0,0),16 },
+       { IPv4(160,231,0,0),16 },
+       { IPv4(160,231,1,0),24 },
+       { IPv4(160,239,0,0),16 },
+       { IPv4(160,239,1,0),24 },
+       { IPv4(160,240,0,0),16 },
+       { IPv4(160,243,0,0),16 },
+       { IPv4(160,248,0,0),16 },
+       { IPv4(160,254,0,0),16 },
+       { IPv4(160,254,107,0),24 },
+       { IPv4(160,254,115,0),24 },
+       { IPv4(160,254,123,0),24 },
+       { IPv4(161,1,0,0),17 },
+       { IPv4(161,1,0,0),16 },
+       { IPv4(161,2,0,0),16 },
+       { IPv4(161,13,0,0),16 },
+       { IPv4(161,16,0,0),16 },
+       { IPv4(161,21,0,0),18 },
+       { IPv4(161,21,20,0),23 },
+       { IPv4(161,21,22,0),23 },
+       { IPv4(161,21,24,0),23 },
+       { IPv4(161,21,26,0),23 },
+       { IPv4(161,21,28,0),23 },
+       { IPv4(161,21,30,0),23 },
+       { IPv4(161,21,32,0),23 },
+       { IPv4(161,21,34,0),23 },
+       { IPv4(161,21,36,0),23 },
+       { IPv4(161,21,38,0),23 },
+       { IPv4(161,21,40,0),23 },
+       { IPv4(161,21,42,0),23 },
+       { IPv4(161,21,44,0),23 },
+       { IPv4(161,21,46,0),23 },
+       { IPv4(161,21,48,0),23 },
+       { IPv4(161,21,50,0),23 },
+       { IPv4(161,21,52,0),23 },
+       { IPv4(161,21,54,0),23 },
+       { IPv4(161,21,56,0),23 },
+       { IPv4(161,21,58,0),23 },
+       { IPv4(161,21,60,0),23 },
+       { IPv4(161,21,62,0),23 },
+       { IPv4(161,21,64,0),19 },
+       { IPv4(161,21,64,0),23 },
+       { IPv4(161,21,66,0),23 },
+       { IPv4(161,21,68,0),23 },
+       { IPv4(161,21,70,0),23 },
+       { IPv4(161,21,72,0),23 },
+       { IPv4(161,21,74,0),23 },
+       { IPv4(161,21,76,0),23 },
+       { IPv4(161,21,78,0),23 },
+       { IPv4(161,21,80,0),23 },
+       { IPv4(161,21,82,0),23 },
+       { IPv4(161,21,84,0),23 },
+       { IPv4(161,21,86,0),23 },
+       { IPv4(161,21,88,0),23 },
+       { IPv4(161,28,0,0),16 },
+       { IPv4(161,33,0,0),16 },
+       { IPv4(161,33,3,0),24 },
+       { IPv4(161,38,0,0),16 },
+       { IPv4(161,40,0,0),16 },
+       { IPv4(161,44,0,0),16 },
+       { IPv4(161,46,0,0),16 },
+       { IPv4(161,51,224,0),20 },
+       { IPv4(161,58,0,0),16 },
+       { IPv4(161,65,0,0),16 },
+       { IPv4(161,69,0,0),16 },
+       { IPv4(161,69,211,0),24 },
+       { IPv4(161,69,212,0),24 },
+       { IPv4(161,69,213,0),24 },
+       { IPv4(161,71,171,0),24 },
+       { IPv4(161,81,0,0),16 },
+       { IPv4(161,97,0,0),16 },
+       { IPv4(161,98,0,0),16 },
+       { IPv4(161,98,128,0),17 },
+       { IPv4(161,114,180,0),24 },
+       { IPv4(161,114,188,0),24 },
+       { IPv4(161,114,189,0),24 },
+       { IPv4(161,114,192,0),20 },
+       { IPv4(161,119,0,0),16 },
+       { IPv4(161,122,0,0),16 },
+       { IPv4(161,124,0,0),16 },
+       { IPv4(161,130,0,0),16 },
+       { IPv4(161,132,232,0),21 },
+       { IPv4(161,132,240,0),21 },
+       { IPv4(161,134,0,0),16 },
+       { IPv4(161,135,0,0),16 },
+       { IPv4(161,136,0,0),16 },
+       { IPv4(161,137,0,0),16 },
+       { IPv4(161,139,0,0),16 },
+       { IPv4(161,142,0,0),17 },
+       { IPv4(161,142,0,0),16 },
+       { IPv4(161,142,128,0),17 },
+       { IPv4(161,149,0,0),16 },
+       { IPv4(161,150,0,0),17 },
+       { IPv4(161,150,128,0),18 },
+       { IPv4(161,150,192,0),18 },
+       { IPv4(161,155,0,0),16 },
+       { IPv4(161,159,0,0),16 },
+       { IPv4(161,160,0,0),16 },
+       { IPv4(161,161,0,0),16 },
+       { IPv4(161,165,0,0),16 },
+       { IPv4(161,173,0,0),16 },
+       { IPv4(161,173,11,0),24 },
+       { IPv4(161,180,0,0),16 },
+       { IPv4(161,181,246,0),24 },
+       { IPv4(161,185,0,0),16 },
+       { IPv4(161,186,0,0),16 },
+       { IPv4(161,195,0,0),16 },
+       { IPv4(161,207,0,0),16 },
+       { IPv4(161,210,0,0),16 },
+       { IPv4(161,213,0,0),16 },
+       { IPv4(161,217,0,0),16 },
+       { IPv4(161,222,0,0),16 },
+       { IPv4(161,223,0,0),16 },
+       { IPv4(161,223,0,0),21 },
+       { IPv4(161,223,8,0),21 },
+       { IPv4(161,223,16,0),20 },
+       { IPv4(161,223,32,0),19 },
+       { IPv4(161,223,64,0),18 },
+       { IPv4(161,223,128,0),18 },
+       { IPv4(161,223,192,0),20 },
+       { IPv4(161,223,208,0),21 },
+       { IPv4(161,223,216,0),22 },
+       { IPv4(161,223,225,0),24 },
+       { IPv4(161,223,226,0),23 },
+       { IPv4(161,223,228,0),22 },
+       { IPv4(161,223,232,0),21 },
+       { IPv4(161,223,240,0),20 },
+       { IPv4(161,224,0,0),16 },
+       { IPv4(161,225,0,0),16 },
+       { IPv4(161,229,0,0),16 },
+       { IPv4(161,230,0,0),16 },
+       { IPv4(161,232,0,0),16 },
+       { IPv4(161,233,0,0),16 },
+       { IPv4(161,242,0,0),16 },
+       { IPv4(161,242,208,0),20 },
+       { IPv4(161,243,0,0),16 },
+       { IPv4(161,246,0,0),16 },
+       { IPv4(161,253,0,0),16 },
+       { IPv4(162,5,0,0),16 },
+       { IPv4(162,5,128,0),17 },
+       { IPv4(162,8,0,0),16 },
+       { IPv4(162,8,230,0),24 },
+       { IPv4(162,8,231,0),24 },
+       { IPv4(162,8,232,0),24 },
+       { IPv4(162,8,233,0),24 },
+       { IPv4(162,10,0,0),16 },
+       { IPv4(162,13,0,0),16 },
+       { IPv4(162,13,32,0),20 },
+       { IPv4(162,24,0,0),16 },
+       { IPv4(162,27,0,0),16 },
+       { IPv4(162,33,1,0),24 },
+       { IPv4(162,33,96,0),21 },
+       { IPv4(162,33,96,0),19 },
+       { IPv4(162,33,104,0),21 },
+       { IPv4(162,33,112,0),21 },
+       { IPv4(162,33,120,0),21 },
+       { IPv4(162,33,163,0),24 },
+       { IPv4(162,36,0,0),16 },
+       { IPv4(162,40,107,0),24 },
+       { IPv4(162,48,0,0),16 },
+       { IPv4(162,51,0,0),16 },
+       { IPv4(162,57,0,0),16 },
+       { IPv4(162,69,0,0),16 },
+       { IPv4(162,88,0,0),16 },
+       { IPv4(162,93,0,0),16 },
+       { IPv4(162,93,64,0),19 },
+       { IPv4(162,93,160,0),19 },
+       { IPv4(162,93,192,0),19 },
+       { IPv4(162,93,224,0),19 },
+       { IPv4(162,94,0,0),16 },
+       { IPv4(162,96,0,0),16 },
+       { IPv4(162,116,0,0),16 },
+       { IPv4(162,123,0,0),16 },
+       { IPv4(162,126,205,0),24 },
+       { IPv4(162,126,206,0),24 },
+       { IPv4(162,126,207,0),24 },
+       { IPv4(162,126,208,0),24 },
+       { IPv4(162,129,0,0),16 },
+       { IPv4(162,130,0,0),16 },
+       { IPv4(162,136,0,0),16 },
+       { IPv4(162,136,40,0),22 },
+       { IPv4(163,2,0,0),16 },
+       { IPv4(163,6,0,0),16 },
+       { IPv4(163,7,0,0),16 },
+       { IPv4(163,10,0,0),16 },
+       { IPv4(163,12,0,0),16 },
+       { IPv4(163,13,0,0),16 },
+       { IPv4(163,14,0,0),15 },
+       { IPv4(163,16,0,0),15 },
+       { IPv4(163,17,32,0),22 },
+       { IPv4(163,17,36,0),23 },
+       { IPv4(163,17,40,0),21 },
+       { IPv4(163,17,48,0),20 },
+       { IPv4(163,17,88,0),21 },
+       { IPv4(163,17,96,0),22 },
+       { IPv4(163,17,108,0),22 },
+       { IPv4(163,17,112,0),20 },
+       { IPv4(163,17,128,0),23 },
+       { IPv4(163,17,130,0),24 },
+       { IPv4(163,17,146,0),23 },
+       { IPv4(163,17,148,0),22 },
+       { IPv4(163,17,152,0),23 },
+       { IPv4(163,17,154,0),24 },
+       { IPv4(163,17,156,0),22 },
+       { IPv4(163,17,160,0),21 },
+       { IPv4(163,17,169,0),24 },
+       { IPv4(163,17,170,0),23 },
+       { IPv4(163,17,172,0),22 },
+       { IPv4(163,17,176,0),22 },
+       { IPv4(163,17,180,0),23 },
+       { IPv4(163,17,182,0),24 },
+       { IPv4(163,17,184,0),21 },
+       { IPv4(163,17,192,0),20 },
+       { IPv4(163,17,208,0),20 },
+       { IPv4(163,17,224,0),20 },
+       { IPv4(163,17,240,0),24 },
+       { IPv4(163,18,0,0),16 },
+       { IPv4(163,19,0,0),16 },
+       { IPv4(163,20,0,0),14 },
+       { IPv4(163,24,0,0),14 },
+       { IPv4(163,28,0,0),16 },
+       { IPv4(163,28,8,0),21 },
+       { IPv4(163,29,0,0),16 },
+       { IPv4(163,30,0,0),16 },
+       { IPv4(163,31,0,0),16 },
+       { IPv4(163,42,0,0),16 },
+       { IPv4(163,49,144,0),22 },
+       { IPv4(163,126,0,0),16 },
+       { IPv4(163,129,0,0),16 },
+       { IPv4(163,139,0,0),16 },
+       { IPv4(163,142,0,0),16 },
+       { IPv4(163,149,0,0),16 },
+       { IPv4(163,152,0,0),16 },
+       { IPv4(163,152,151,0),24 },
+       { IPv4(163,152,152,0),24 },
+       { IPv4(163,152,153,0),24 },
+       { IPv4(163,152,154,0),24 },
+       { IPv4(163,152,161,0),24 },
+       { IPv4(163,152,162,0),24 },
+       { IPv4(163,152,163,0),24 },
+       { IPv4(163,152,164,0),24 },
+       { IPv4(163,152,171,0),24 },
+       { IPv4(163,152,172,0),24 },
+       { IPv4(163,152,173,0),24 },
+       { IPv4(163,152,174,0),24 },
+       { IPv4(163,153,0,0),16 },
+       { IPv4(163,153,238,0),24 },
+       { IPv4(163,156,0,0),16 },
+       { IPv4(163,157,0,0),16 },
+       { IPv4(163,164,0,0),16 },
+       { IPv4(163,166,0,0),16 },
+       { IPv4(163,175,0,0),16 },
+       { IPv4(163,179,0,0),16 },
+       { IPv4(163,179,38,0),24 },
+       { IPv4(163,179,107,0),24 },
+       { IPv4(163,179,161,0),24 },
+       { IPv4(163,180,0,0),18 },
+       { IPv4(163,180,0,0),17 },
+       { IPv4(163,180,64,0),19 },
+       { IPv4(163,180,96,0),19 },
+       { IPv4(163,180,128,0),18 },
+       { IPv4(163,180,128,0),19 },
+       { IPv4(163,180,160,0),21 },
+       { IPv4(163,180,168,0),23 },
+       { IPv4(163,180,170,0),24 },
+       { IPv4(163,191,0,0),16 },
+       { IPv4(163,191,0,0),19 },
+       { IPv4(163,191,96,0),19 },
+       { IPv4(163,191,128,0),19 },
+       { IPv4(163,191,192,0),19 },
+       { IPv4(163,191,224,0),19 },
+       { IPv4(163,196,0,0),16 },
+       { IPv4(163,197,0,0),16 },
+       { IPv4(163,198,0,0),15 },
+       { IPv4(163,199,132,0),24 },
+       { IPv4(163,200,0,0),16 },
+       { IPv4(163,201,0,0),16 },
+       { IPv4(163,202,0,0),15 },
+       { IPv4(163,205,0,0),16 },
+       { IPv4(163,206,0,0),16 },
+       { IPv4(163,207,0,0),16 },
+       { IPv4(163,215,0,0),16 },
+       { IPv4(163,220,0,0),16 },
+       { IPv4(163,221,0,0),16 },
+       { IPv4(163,224,0,0),16 },
+       { IPv4(163,228,0,0),16 },
+       { IPv4(163,230,0,0),16 },
+       { IPv4(163,231,0,0),16 },
+       { IPv4(163,234,0,0),16 },
+       { IPv4(163,238,0,0),16 },
+       { IPv4(163,239,0,0),17 },
+       { IPv4(163,239,128,0),18 },
+       { IPv4(163,239,251,0),24 },
+       { IPv4(163,244,0,0),16 },
+       { IPv4(163,247,0,0),16 },
+       { IPv4(163,247,40,0),24 },
+       { IPv4(163,247,41,0),24 },
+       { IPv4(163,247,42,0),24 },
+       { IPv4(163,247,43,0),24 },
+       { IPv4(163,247,44,0),24 },
+       { IPv4(163,247,46,0),24 },
+       { IPv4(163,247,47,0),24 },
+       { IPv4(163,247,48,0),24 },
+       { IPv4(163,247,49,0),24 },
+       { IPv4(163,247,50,0),24 },
+       { IPv4(163,247,51,0),24 },
+       { IPv4(163,247,52,0),24 },
+       { IPv4(163,247,53,0),24 },
+       { IPv4(163,247,54,0),24 },
+       { IPv4(163,247,55,0),24 },
+       { IPv4(163,247,56,0),24 },
+       { IPv4(163,247,57,0),24 },
+       { IPv4(163,247,58,0),24 },
+       { IPv4(163,247,59,0),24 },
+       { IPv4(163,247,60,0),24 },
+       { IPv4(163,247,61,0),24 },
+       { IPv4(163,247,62,0),24 },
+       { IPv4(163,247,63,0),24 },
+       { IPv4(163,247,64,0),24 },
+       { IPv4(163,247,65,0),24 },
+       { IPv4(163,247,69,0),24 },
+       { IPv4(163,247,70,0),24 },
+       { IPv4(163,247,71,0),24 },
+       { IPv4(163,247,72,0),24 },
+       { IPv4(163,248,0,0),16 },
+       { IPv4(163,249,0,0),16 },
+       { IPv4(163,249,43,0),24 },
+       { IPv4(163,249,53,0),24 },
+       { IPv4(163,249,54,0),24 },
+       { IPv4(163,249,57,0),24 },
+       { IPv4(163,249,140,0),22 },
+       { IPv4(163,249,160,0),21 },
+       { IPv4(163,249,168,0),23 },
+       { IPv4(163,249,170,0),24 },
+       { IPv4(163,251,0,0),19 },
+       { IPv4(163,251,32,0),22 },
+       { IPv4(163,251,36,0),22 },
+       { IPv4(163,251,40,0),22 },
+       { IPv4(163,251,44,0),22 },
+       { IPv4(163,251,48,0),22 },
+       { IPv4(163,251,52,0),22 },
+       { IPv4(163,251,64,0),19 },
+       { IPv4(163,251,96,0),22 },
+       { IPv4(163,251,224,0),19 },
+       { IPv4(163,251,226,0),24 },
+       { IPv4(163,251,228,0),24 },
+       { IPv4(163,251,229,0),24 },
+       { IPv4(163,251,240,0),21 },
+       { IPv4(163,251,250,0),24 },
+       { IPv4(163,251,251,0),24 },
+       { IPv4(163,251,252,0),24 },
+       { IPv4(163,251,254,0),24 },
+       { IPv4(164,38,0,0),16 },
+       { IPv4(164,39,185,0),24 },
+       { IPv4(164,43,0,0),16 },
+       { IPv4(164,47,0,0),16 },
+       { IPv4(164,48,199,0),24 },
+       { IPv4(164,49,0,0),16 },
+       { IPv4(164,50,0,0),16 },
+       { IPv4(164,54,0,0),16 },
+       { IPv4(164,55,0,0),16 },
+       { IPv4(164,57,0,0),16 },
+       { IPv4(164,65,0,0),16 },
+       { IPv4(164,66,0,0),16 },
+       { IPv4(164,67,0,0),16 },
+       { IPv4(164,68,0,0),16 },
+       { IPv4(164,78,0,0),16 },
+       { IPv4(164,79,0,0),16 },
+       { IPv4(164,83,0,0),16 },
+       { IPv4(164,87,0,0),16 },
+       { IPv4(164,88,0,0),16 },
+       { IPv4(164,92,0,0),16 },
+       { IPv4(164,92,24,0),24 },
+       { IPv4(164,92,144,0),24 },
+       { IPv4(164,92,146,0),24 },
+       { IPv4(164,92,155,0),24 },
+       { IPv4(164,94,0,0),16 },
+       { IPv4(164,99,0,0),16 },
+       { IPv4(164,103,0,0),16 },
+       { IPv4(164,104,0,0),16 },
+       { IPv4(164,105,168,0),24 },
+       { IPv4(164,113,0,0),19 },
+       { IPv4(164,113,32,0),19 },
+       { IPv4(164,113,240,0),21 },
+       { IPv4(164,117,0,0),16 },
+       { IPv4(164,121,0,0),16 },
+       { IPv4(164,122,0,0),16 },
+       { IPv4(164,124,0,0),16 },
+       { IPv4(164,125,0,0),16 },
+       { IPv4(164,140,0,0),16 },
+       { IPv4(164,143,248,0),24 },
+       { IPv4(164,143,249,0),24 },
+       { IPv4(164,143,250,0),24 },
+       { IPv4(164,143,251,0),24 },
+       { IPv4(164,145,0,0),16 },
+       { IPv4(164,155,0,0),16 },
+       { IPv4(164,158,0,0),16 },
+       { IPv4(164,164,0,0),16 },
+       { IPv4(164,164,42,0),24 },
+       { IPv4(164,164,45,0),24 },
+       { IPv4(164,164,46,0),24 },
+       { IPv4(164,164,48,0),24 },
+       { IPv4(164,164,97,0),24 },
+       { IPv4(164,167,0,0),16 },
+       { IPv4(164,171,0,0),16 },
+       { IPv4(164,178,0,0),16 },
+       { IPv4(164,190,0,0),16 },
+       { IPv4(164,191,0,0),16 },
+       { IPv4(164,216,0,0),16 },
+       { IPv4(164,218,0,0),16 },
+       { IPv4(164,220,0,0),16 },
+       { IPv4(164,221,0,0),18 },
+       { IPv4(164,221,0,0),16 },
+       { IPv4(164,221,64,0),19 },
+       { IPv4(164,221,184,0),21 },
+       { IPv4(164,221,192,0),21 },
+       { IPv4(164,221,200,0),21 },
+       { IPv4(164,221,208,0),20 },
+       { IPv4(164,221,224,0),19 },
+       { IPv4(164,223,0,0),16 },
+       { IPv4(164,224,0,0),16 },
+       { IPv4(164,226,0,0),16 },
+       { IPv4(164,227,0,0),16 },
+       { IPv4(164,230,0,0),15 },
+       { IPv4(164,230,0,0),16 },
+       { IPv4(164,231,72,0),24 },
+       { IPv4(165,1,0,0),16 },
+       { IPv4(165,4,0,0),16 },
+       { IPv4(165,8,0,0),16 },
+       { IPv4(165,10,0,0),16 },
+       { IPv4(165,11,0,0),16 },
+       { IPv4(165,21,0,0),16 },
+       { IPv4(165,21,24,0),21 },
+       { IPv4(165,21,112,0),21 },
+       { IPv4(165,21,124,0),22 },
+       { IPv4(165,21,128,0),22 },
+       { IPv4(165,21,132,0),24 },
+       { IPv4(165,21,134,0),24 },
+       { IPv4(165,24,0,0),16 },
+       { IPv4(165,25,0,0),16 },
+       { IPv4(165,26,0,0),16 },
+       { IPv4(165,28,0,0),16 },
+       { IPv4(165,30,0,0),16 },
+       { IPv4(165,64,0,0),16 },
+       { IPv4(165,65,0,0),16 },
+       { IPv4(165,72,32,0),19 },
+       { IPv4(165,76,0,0),16 },
+       { IPv4(165,83,0,0),16 },
+       { IPv4(165,87,0,0),16 },
+       { IPv4(165,87,15,0),24 },
+       { IPv4(165,87,17,0),24 },
+       { IPv4(165,87,44,0),23 },
+       { IPv4(165,87,49,0),24 },
+       { IPv4(165,87,56,0),24 },
+       { IPv4(165,87,108,0),22 },
+       { IPv4(165,87,112,0),21 },
+       { IPv4(165,87,113,0),24 },
+       { IPv4(165,87,114,0),24 },
+       { IPv4(165,87,173,0),24 },
+       { IPv4(165,87,177,0),24 },
+       { IPv4(165,89,0,0),16 },
+       { IPv4(165,97,0,0),16 },
+       { IPv4(165,98,4,0),24 },
+       { IPv4(165,98,8,0),24 },
+       { IPv4(165,98,11,0),24 },
+       { IPv4(165,98,12,0),24 },
+       { IPv4(165,98,101,0),24 },
+       { IPv4(165,98,102,0),24 },
+       { IPv4(165,98,103,0),24 },
+       { IPv4(165,98,104,0),22 },
+       { IPv4(165,113,0,0),16 },
+       { IPv4(165,113,127,0),24 },
+       { IPv4(165,113,128,0),24 },
+       { IPv4(165,113,129,0),24 },
+       { IPv4(165,113,156,0),24 },
+       { IPv4(165,113,161,0),24 },
+       { IPv4(165,113,176,0),24 },
+       { IPv4(165,113,187,0),24 },
+       { IPv4(165,113,189,0),24 },
+       { IPv4(165,113,190,0),24 },
+       { IPv4(165,113,191,0),24 },
+       { IPv4(165,113,193,0),24 },
+       { IPv4(165,113,208,0),24 },
+       { IPv4(165,113,239,0),24 },
+       { IPv4(165,121,0,0),16 },
+       { IPv4(165,121,96,0),20 },
+       { IPv4(165,123,0,0),16 },
+       { IPv4(165,124,0,0),16 },
+       { IPv4(165,125,32,0),20 },
+       { IPv4(165,127,0,0),16 },
+       { IPv4(165,130,0,0),16 },
+       { IPv4(165,132,0,0),16 },
+       { IPv4(165,132,224,0),22 },
+       { IPv4(165,132,228,0),22 },
+       { IPv4(165,132,232,0),21 },
+       { IPv4(165,132,240,0),22 },
+       { IPv4(165,132,244,0),23 },
+       { IPv4(165,132,246,0),23 },
+       { IPv4(165,133,0,0),17 },
+       { IPv4(165,133,40,0),24 },
+       { IPv4(165,133,128,0),17 },
+       { IPv4(165,137,0,0),16 },
+       { IPv4(165,141,0,0),16 },
+       { IPv4(165,141,0,0),22 },
+       { IPv4(165,141,4,0),23 },
+       { IPv4(165,141,6,0),24 },
+       { IPv4(165,141,8,0),21 },
+       { IPv4(165,141,16,0),20 },
+       { IPv4(165,141,96,0),20 },
+       { IPv4(165,141,112,0),20 },
+       { IPv4(165,141,128,0),19 },
+       { IPv4(165,141,160,0),20 },
+       { IPv4(165,141,184,0),22 },
+       { IPv4(165,141,200,0),22 },
+       { IPv4(165,141,208,0),21 },
+       { IPv4(165,141,216,0),22 },
+       { IPv4(165,141,220,0),22 },
+       { IPv4(165,141,222,0),23 },
+       { IPv4(165,141,224,0),22 },
+       { IPv4(165,141,228,0),23 },
+       { IPv4(165,141,238,0),23 },
+       { IPv4(165,141,240,0),23 },
+       { IPv4(165,141,244,0),24 },
+       { IPv4(165,141,249,0),24 },
+       { IPv4(165,150,0,0),16 },
+       { IPv4(165,152,0,0),16 },
+       { IPv4(165,155,0,0),16 },
+       { IPv4(165,166,0,0),16 },
+       { IPv4(165,170,24,0),21 },
+       { IPv4(165,170,64,0),24 },
+       { IPv4(165,170,128,0),24 },
+       { IPv4(165,170,176,0),20 },
+       { IPv4(165,170,208,0),24 },
+       { IPv4(165,173,0,0),16 },
+       { IPv4(165,186,0,0),16 },
+       { IPv4(165,190,122,0),23 },
+       { IPv4(165,190,124,0),22 },
+       { IPv4(165,190,128,0),21 },
+       { IPv4(165,193,0,0),16 },
+       { IPv4(165,194,0,0),16 },
+       { IPv4(165,194,128,0),17 },
+       { IPv4(165,196,0,0),16 },
+       { IPv4(165,201,0,0),16 },
+       { IPv4(165,206,0,0),16 },
+       { IPv4(165,206,238,0),24 },
+       { IPv4(165,212,0,0),18 },
+       { IPv4(165,212,0,0),16 },
+       { IPv4(165,212,63,0),24 },
+       { IPv4(165,217,0,0),16 },
+       { IPv4(165,225,194,0),24 },
+       { IPv4(165,229,0,0),16 },
+       { IPv4(165,230,0,0),16 },
+       { IPv4(165,232,0,0),16 },
+       { IPv4(165,233,0,0),16 },
+       { IPv4(165,236,0,0),16 },
+       { IPv4(165,238,0,0),16 },
+       { IPv4(165,243,0,0),16 },
+       { IPv4(165,244,0,0),16 },
+       { IPv4(165,246,0,0),16 },
+       { IPv4(165,247,0,0),16 },
+       { IPv4(165,247,120,0),21 },
+       { IPv4(165,247,196,0),22 },
+       { IPv4(165,247,200,0),21 },
+       { IPv4(165,247,208,0),20 },
+       { IPv4(165,247,224,0),22 },
+       { IPv4(165,247,248,0),21 },
+       { IPv4(165,251,0,0),16 },
+       { IPv4(165,251,24,0),22 },
+       { IPv4(165,251,28,0),22 },
+       { IPv4(165,251,32,0),22 },
+       { IPv4(165,251,36,0),22 },
+       { IPv4(165,251,252,0),22 },
+       { IPv4(165,252,93,0),24 },
+       { IPv4(165,254,0,0),16 },
+       { IPv4(165,254,85,0),24 },
+       { IPv4(166,16,0,0),16 },
+       { IPv4(166,19,0,0),16 },
+       { IPv4(166,20,0,0),16 },
+       { IPv4(166,21,0,0),16 },
+       { IPv4(166,30,0,0),16 },
+       { IPv4(166,49,128,0),17 },
+       { IPv4(166,49,128,0),23 },
+       { IPv4(166,49,130,0),24 },
+       { IPv4(166,49,131,0),24 },
+       { IPv4(166,49,132,0),24 },
+       { IPv4(166,49,133,0),24 },
+       { IPv4(166,49,134,0),24 },
+       { IPv4(166,49,137,0),24 },
+       { IPv4(166,49,138,0),24 },
+       { IPv4(166,49,139,0),24 },
+       { IPv4(166,49,144,0),24 },
+       { IPv4(166,49,149,0),24 },
+       { IPv4(166,49,156,0),24 },
+       { IPv4(166,49,172,0),22 },
+       { IPv4(166,49,180,0),23 },
+       { IPv4(166,49,182,0),23 },
+       { IPv4(166,49,184,0),23 },
+       { IPv4(166,49,186,0),23 },
+       { IPv4(166,49,190,0),23 },
+       { IPv4(166,49,192,0),22 },
+       { IPv4(166,49,224,0),19 },
+       { IPv4(166,70,0,0),16 },
+       { IPv4(166,72,0,0),16 },
+       { IPv4(166,72,88,0),24 },
+       { IPv4(166,72,96,0),24 },
+       { IPv4(166,72,121,0),24 },
+       { IPv4(166,72,122,0),24 },
+       { IPv4(166,72,126,0),24 },
+       { IPv4(166,72,149,0),24 },
+       { IPv4(166,72,151,0),24 },
+       { IPv4(166,72,158,0),24 },
+       { IPv4(166,72,159,0),24 },
+       { IPv4(166,72,162,0),24 },
+       { IPv4(166,72,169,0),24 },
+       { IPv4(166,72,173,0),24 },
+       { IPv4(166,72,181,0),24 },
+       { IPv4(166,72,202,0),24 },
+       { IPv4(166,72,203,0),24 },
+       { IPv4(166,72,208,0),24 },
+       { IPv4(166,72,214,0),24 },
+       { IPv4(166,72,220,0),24 },
+       { IPv4(166,72,233,0),24 },
+       { IPv4(166,72,249,0),24 },
+       { IPv4(166,73,0,0),16 },
+       { IPv4(166,73,20,0),24 },
+       { IPv4(166,77,0,0),16 },
+       { IPv4(166,80,8,0),24 },
+       { IPv4(166,80,9,0),24 },
+       { IPv4(166,80,10,0),24 },
+       { IPv4(166,80,16,0),24 },
+       { IPv4(166,80,27,0),24 },
+       { IPv4(166,80,28,0),23 },
+       { IPv4(166,80,30,0),24 },
+       { IPv4(166,80,46,0),24 },
+       { IPv4(166,80,50,0),24 },
+       { IPv4(166,80,54,0),24 },
+       { IPv4(166,80,62,0),24 },
+       { IPv4(166,80,74,0),24 },
+       { IPv4(166,80,78,0),24 },
+       { IPv4(166,80,79,0),24 },
+       { IPv4(166,80,82,0),24 },
+       { IPv4(166,80,83,0),24 },
+       { IPv4(166,80,84,0),24 },
+       { IPv4(166,80,90,0),24 },
+       { IPv4(166,80,106,0),24 },
+       { IPv4(166,80,114,0),24 },
+       { IPv4(166,80,120,0),24 },
+       { IPv4(166,80,126,0),24 },
+       { IPv4(166,80,127,0),24 },
+       { IPv4(166,80,242,0),24 },
+       { IPv4(166,84,0,0),16 },
+       { IPv4(166,84,56,0),21 },
+       { IPv4(166,84,56,0),22 },
+       { IPv4(166,84,60,0),22 },
+       { IPv4(166,84,140,0),23 },
+       { IPv4(166,84,143,0),24 },
+       { IPv4(166,84,144,0),20 },
+       { IPv4(166,84,150,0),24 },
+       { IPv4(166,84,157,0),24 },
+       { IPv4(166,84,168,0),22 },
+       { IPv4(166,84,172,0),24 },
+       { IPv4(166,84,174,0),24 },
+       { IPv4(166,84,185,0),24 },
+       { IPv4(166,84,191,0),24 },
+       { IPv4(166,88,0,0),16 },
+       { IPv4(166,88,88,0),24 },
+       { IPv4(166,89,0,0),16 },
+       { IPv4(166,90,14,0),24 },
+       { IPv4(166,104,0,0),17 },
+       { IPv4(166,104,128,0),19 },
+       { IPv4(166,104,160,0),20 },
+       { IPv4(166,104,176,0),21 },
+       { IPv4(166,104,184,0),21 },
+       { IPv4(166,104,192,0),18 },
+       { IPv4(166,113,0,0),16 },
+       { IPv4(166,114,128,0),19 },
+       { IPv4(166,114,248,0),21 },
+       { IPv4(166,119,0,0),16 },
+       { IPv4(166,121,0,0),16 },
+       { IPv4(166,124,0,0),16 },
+       { IPv4(166,126,0,0),16 },
+       { IPv4(166,127,0,0),16 },
+       { IPv4(166,128,0,0),16 },
+       { IPv4(166,128,0,0),13 },
+       { IPv4(166,129,0,0),16 },
+       { IPv4(166,130,0,0),16 },
+       { IPv4(166,131,0,0),16 },
+       { IPv4(166,132,0,0),16 },
+       { IPv4(166,133,0,0),16 },
+       { IPv4(166,134,0,0),16 },
+       { IPv4(166,137,0,0),16 },
+       { IPv4(166,147,64,0),18 },
+       { IPv4(166,150,0,0),18 },
+       { IPv4(166,150,128,0),18 },
+       { IPv4(166,164,0,0),16 },
+       { IPv4(166,177,0,0),16 },
+       { IPv4(166,177,111,0),24 },
+       { IPv4(166,183,0,0),16 },
+       { IPv4(166,184,0,0),16 },
+       { IPv4(166,185,0,0),16 },
+       { IPv4(166,186,0,0),16 },
+       { IPv4(166,187,0,0),16 },
+       { IPv4(166,188,0,0),16 },
+       { IPv4(166,189,0,0),16 },
+       { IPv4(166,190,0,0),16 },
+       { IPv4(166,191,0,0),16 },
+       { IPv4(166,192,0,0),16 },
+       { IPv4(166,193,0,0),16 },
+       { IPv4(166,194,0,0),16 },
+       { IPv4(166,195,0,0),16 },
+       { IPv4(166,196,0,0),16 },
+       { IPv4(166,197,0,0),16 },
+       { IPv4(166,198,0,0),16 },
+       { IPv4(166,199,0,0),16 },
+       { IPv4(166,200,0,0),16 },
+       { IPv4(166,201,0,0),16 },
+       { IPv4(166,202,0,0),16 },
+       { IPv4(166,203,0,0),16 },
+       { IPv4(166,204,0,0),16 },
+       { IPv4(166,213,0,0),16 },
+       { IPv4(167,1,0,0),16 },
+       { IPv4(167,1,100,0),24 },
+       { IPv4(167,1,101,0),24 },
+       { IPv4(167,1,102,0),24 },
+       { IPv4(167,1,103,0),24 },
+       { IPv4(167,1,104,0),24 },
+       { IPv4(167,1,105,0),24 },
+       { IPv4(167,1,106,0),24 },
+       { IPv4(167,1,107,0),24 },
+       { IPv4(167,1,108,0),24 },
+       { IPv4(167,1,109,0),24 },
+       { IPv4(167,1,110,0),24 },
+       { IPv4(167,1,112,0),24 },
+       { IPv4(167,1,113,0),24 },
+       { IPv4(167,1,118,0),24 },
+       { IPv4(167,1,120,0),24 },
+       { IPv4(167,1,122,0),24 },
+       { IPv4(167,1,123,0),24 },
+       { IPv4(167,1,124,0),24 },
+       { IPv4(167,1,125,0),24 },
+       { IPv4(167,1,127,0),24 },
+       { IPv4(167,1,128,0),24 },
+       { IPv4(167,1,129,0),24 },
+       { IPv4(167,1,130,0),24 },
+       { IPv4(167,1,131,0),24 },
+       { IPv4(167,1,132,0),24 },
+       { IPv4(167,1,133,0),24 },
+       { IPv4(167,1,134,0),24 },
+       { IPv4(167,1,135,0),24 },
+       { IPv4(167,1,136,0),24 },
+       { IPv4(167,1,141,0),24 },
+       { IPv4(167,6,0,0),16 },
+       { IPv4(167,7,0,0),16 },
+       { IPv4(167,14,48,0),21 },
+       { IPv4(167,23,0,0),16 },
+       { IPv4(167,24,0,0),16 },
+       { IPv4(167,24,101,0),24 },
+       { IPv4(167,24,102,0),24 },
+       { IPv4(167,24,103,0),24 },
+       { IPv4(167,24,104,0),24 },
+       { IPv4(167,24,105,0),24 },
+       { IPv4(167,24,241,0),24 },
+       { IPv4(167,24,242,0),24 },
+       { IPv4(167,24,243,0),24 },
+       { IPv4(167,24,244,0),24 },
+       { IPv4(167,24,245,0),24 },
+       { IPv4(167,25,0,0),16 },
+       { IPv4(167,28,0,0),16 },
+       { IPv4(167,28,10,0),24 },
+       { IPv4(167,28,11,0),24 },
+       { IPv4(167,28,15,0),24 },
+       { IPv4(167,28,27,0),24 },
+       { IPv4(167,28,28,0),24 },
+       { IPv4(167,28,29,0),24 },
+       { IPv4(167,28,32,0),24 },
+       { IPv4(167,28,33,0),24 },
+       { IPv4(167,28,37,0),24 },
+       { IPv4(167,28,39,0),24 },
+       { IPv4(167,28,49,0),24 },
+       { IPv4(167,28,51,0),24 },
+       { IPv4(167,28,52,0),24 },
+       { IPv4(167,28,54,0),24 },
+       { IPv4(167,28,73,0),24 },
+       { IPv4(167,28,74,0),24 },
+       { IPv4(167,28,92,0),24 },
+       { IPv4(167,28,141,0),24 },
+       { IPv4(167,28,203,0),24 },
+       { IPv4(167,33,0,0),16 },
+       { IPv4(167,33,21,0),24 },
+       { IPv4(167,33,61,0),24 },
+       { IPv4(167,33,63,0),24 },
+       { IPv4(167,64,0,0),16 },
+       { IPv4(167,64,43,0),24 },
+       { IPv4(167,64,48,0),24 },
+       { IPv4(167,64,57,0),24 },
+       { IPv4(167,64,85,0),24 },
+       { IPv4(167,66,0,0),16 },
+       { IPv4(167,68,0,0),16 },
+       { IPv4(167,77,36,0),24 },
+       { IPv4(167,79,0,0),16 },
+       { IPv4(167,80,246,0),24 },
+       { IPv4(167,82,0,0),16 },
+       { IPv4(167,83,0,0),16 },
+       { IPv4(167,83,96,0),24 },
+       { IPv4(167,83,98,0),24 },
+       { IPv4(167,83,101,0),24 },
+       { IPv4(167,86,0,0),16 },
+       { IPv4(167,86,20,0),24 },
+       { IPv4(167,86,34,0),24 },
+       { IPv4(167,86,48,0),24 },
+       { IPv4(167,86,60,0),24 },
+       { IPv4(167,86,76,0),24 },
+       { IPv4(167,86,98,0),24 },
+       { IPv4(167,86,100,0),24 },
+       { IPv4(167,89,0,0),16 },
+       { IPv4(167,94,0,0),16 },
+       { IPv4(167,98,0,0),16 },
+       { IPv4(167,107,0,0),16 },
+       { IPv4(167,115,0,0),16 },
+       { IPv4(167,115,0,0),17 },
+       { IPv4(167,120,0,0),16 },
+       { IPv4(167,121,0,0),16 },
+       { IPv4(167,127,0,0),16 },
+       { IPv4(167,127,160,0),21 },
+       { IPv4(167,132,0,0),16 },
+       { IPv4(167,136,0,0),16 },
+       { IPv4(167,136,25,0),24 },
+       { IPv4(167,136,35,0),24 },
+       { IPv4(167,136,225,0),24 },
+       { IPv4(167,136,235,0),24 },
+       { IPv4(167,140,0,0),16 },
+       { IPv4(167,142,0,0),16 },
+       { IPv4(167,147,0,0),16 },
+       { IPv4(167,150,0,0),16 },
+       { IPv4(167,151,0,0),16 },
+       { IPv4(167,153,0,0),16 },
+       { IPv4(167,154,0,0),16 },
+       { IPv4(167,157,0,0),16 },
+       { IPv4(167,160,188,0),23 },
+       { IPv4(167,160,212,0),24 },
+       { IPv4(167,160,246,0),24 },
+       { IPv4(167,160,247,0),24 },
+       { IPv4(167,165,0,0),16 },
+       { IPv4(167,166,0,0),16 },
+       { IPv4(167,167,0,0),16 },
+       { IPv4(167,177,0,0),16 },
+       { IPv4(167,185,0,0),16 },
+       { IPv4(167,186,249,0),24 },
+       { IPv4(167,187,0,0),16 },
+       { IPv4(167,192,0,0),13 },
+       { IPv4(167,200,0,0),16 },
+       { IPv4(167,211,0,0),16 },
+       { IPv4(167,216,0,0),17 },
+       { IPv4(167,216,0,0),16 },
+       { IPv4(167,216,128,0),17 },
+       { IPv4(167,230,42,0),24 },
+       { IPv4(167,232,0,0),16 },
+       { IPv4(167,234,0,0),16 },
+       { IPv4(167,236,0,0),16 },
+       { IPv4(167,239,0,0),16 },
+       { IPv4(167,239,176,0),24 },
+       { IPv4(167,239,192,0),20 },
+       { IPv4(167,239,208,0),20 },
+       { IPv4(167,242,0,0),16 },
+       { IPv4(167,248,0,0),16 },
+       { IPv4(167,252,0,0),16 },
+       { IPv4(167,253,0,0),16 },
+       { IPv4(168,16,0,0),15 },
+       { IPv4(168,18,0,0),15 },
+       { IPv4(168,20,0,0),15 },
+       { IPv4(168,22,0,0),15 },
+       { IPv4(168,24,0,0),15 },
+       { IPv4(168,26,0,0),15 },
+       { IPv4(168,28,0,0),15 },
+       { IPv4(168,30,0,0),15 },
+       { IPv4(168,32,0,0),12 },
+       { IPv4(168,48,0,0),13 },
+       { IPv4(168,56,0,0),14 },
+       { IPv4(168,60,0,0),16 },
+       { IPv4(168,69,0,0),16 },
+       { IPv4(168,73,128,0),17 },
+       { IPv4(168,75,0,0),24 },
+       { IPv4(168,75,0,0),18 },
+       { IPv4(168,75,0,0),16 },
+       { IPv4(168,78,0,0),16 },
+       { IPv4(168,80,0,0),15 },
+       { IPv4(168,84,0,0),16 },
+       { IPv4(168,88,224,0),24 },
+       { IPv4(168,89,0,0),16 },
+       { IPv4(168,95,0,0),16 },
+       { IPv4(168,97,0,0),16 },
+       { IPv4(168,97,0,0),17 },
+       { IPv4(168,97,128,0),17 },
+       { IPv4(168,100,0,0),16 },
+       { IPv4(168,111,0,0),16 },
+       { IPv4(168,115,0,0),16 },
+       { IPv4(168,120,0,0),16 },
+       { IPv4(168,126,0,0),16 },
+       { IPv4(168,126,27,0),24 },
+       { IPv4(168,126,60,0),24 },
+       { IPv4(168,126,61,0),24 },
+       { IPv4(168,126,120,0),24 },
+       { IPv4(168,126,120,0),22 },
+       { IPv4(168,126,121,0),24 },
+       { IPv4(168,126,122,0),24 },
+       { IPv4(168,126,123,0),24 },
+       { IPv4(168,126,167,0),24 },
+       { IPv4(168,126,212,0),24 },
+       { IPv4(168,131,0,0),16 },
+       { IPv4(168,135,0,0),16 },
+       { IPv4(168,142,0,0),16 },
+       { IPv4(168,143,0,0),16 },
+       { IPv4(168,149,0,0),16 },
+       { IPv4(168,151,0,0),16 },
+       { IPv4(168,154,0,0),16 },
+       { IPv4(168,158,0,0),16 },
+       { IPv4(168,160,0,0),16 },
+       { IPv4(168,164,0,0),16 },
+       { IPv4(168,165,0,0),16 },
+       { IPv4(168,166,0,0),16 },
+       { IPv4(168,167,0,0),16 },
+       { IPv4(168,168,0,0),16 },
+       { IPv4(168,170,0,0),16 },
+       { IPv4(168,171,0,0),16 },
+       { IPv4(168,172,0,0),16 },
+       { IPv4(168,173,0,0),16 },
+       { IPv4(168,174,0,0),16 },
+       { IPv4(168,177,0,0),16 },
+       { IPv4(168,178,0,0),16 },
+       { IPv4(168,179,0,0),16 },
+       { IPv4(168,180,0,0),16 },
+       { IPv4(168,183,0,0),16 },
+       { IPv4(168,186,0,0),16 },
+       { IPv4(168,188,0,0),16 },
+       { IPv4(168,200,0,0),16 },
+       { IPv4(168,200,2,0),24 },
+       { IPv4(168,203,0,0),16 },
+       { IPv4(168,205,0,0),16 },
+       { IPv4(168,208,0,0),16 },
+       { IPv4(168,209,0,0),16 },
+       { IPv4(168,210,0,0),16 },
+       { IPv4(168,210,1,0),24 },
+       { IPv4(168,210,40,0),23 },
+       { IPv4(168,210,50,0),24 },
+       { IPv4(168,210,68,0),22 },
+       { IPv4(168,210,100,0),24 },
+       { IPv4(168,210,128,0),17 },
+       { IPv4(168,215,81,0),24 },
+       { IPv4(168,215,104,0),24 },
+       { IPv4(168,215,106,0),23 },
+       { IPv4(168,215,108,0),23 },
+       { IPv4(168,215,113,0),24 },
+       { IPv4(168,215,167,0),24 },
+       { IPv4(168,215,224,0),20 },
+       { IPv4(168,215,234,0),23 },
+       { IPv4(168,215,236,0),23 },
+       { IPv4(168,220,0,0),16 },
+       { IPv4(168,224,1,0),24 },
+       { IPv4(168,226,0,0),16 },
+       { IPv4(168,230,0,0),16 },
+       { IPv4(168,230,128,0),19 },
+       { IPv4(168,231,0,0),16 },
+       { IPv4(168,234,52,0),24 },
+       { IPv4(168,234,53,0),24 },
+       { IPv4(168,234,54,0),24 },
+       { IPv4(168,234,55,0),24 },
+       { IPv4(168,234,56,0),24 },
+       { IPv4(168,234,57,0),24 },
+       { IPv4(168,234,58,0),24 },
+       { IPv4(168,234,59,0),24 },
+       { IPv4(168,234,60,0),24 },
+       { IPv4(168,234,61,0),24 },
+       { IPv4(168,234,62,0),24 },
+       { IPv4(168,234,92,0),24 },
+       { IPv4(168,241,0,0),16 },
+       { IPv4(168,243,80,0),24 },
+       { IPv4(168,243,81,0),24 },
+       { IPv4(168,243,176,0),21 },
+       { IPv4(168,243,184,0),21 },
+       { IPv4(168,243,224,0),20 },
+       { IPv4(168,243,231,0),24 },
+       { IPv4(168,244,0,0),16 },
+       { IPv4(168,248,0,0),15 },
+       { IPv4(169,4,0,0),14 },
+       { IPv4(169,71,20,0),24 },
+       { IPv4(169,71,80,0),24 },
+       { IPv4(169,71,97,0),24 },
+       { IPv4(169,100,0,0),19 },
+       { IPv4(169,130,0,0),16 },
+       { IPv4(169,131,0,0),16 },
+       { IPv4(169,132,0,0),16 },
+       { IPv4(169,133,0,0),16 },
+       { IPv4(169,138,0,0),16 },
+       { IPv4(169,140,0,0),18 },
+       { IPv4(169,140,64,0),18 },
+       { IPv4(169,140,128,0),18 },
+       { IPv4(169,140,192,0),18 },
+       { IPv4(169,142,0,0),16 },
+       { IPv4(169,144,0,0),16 },
+       { IPv4(169,146,0,0),16 },
+       { IPv4(169,149,0,0),16 },
+       { IPv4(169,150,0,0),16 },
+       { IPv4(169,152,0,0),16 },
+       { IPv4(169,153,128,0),24 },
+       { IPv4(169,153,130,0),24 },
+       { IPv4(169,153,134,0),24 },
+       { IPv4(169,154,0,0),16 },
+       { IPv4(169,155,0,0),16 },
+       { IPv4(169,156,0,0),16 },
+       { IPv4(169,157,0,0),16 },
+       { IPv4(169,197,0,0),18 },
+       { IPv4(169,205,0,0),16 },
+       { IPv4(169,206,0,0),16 },
+       { IPv4(169,226,0,0),16 },
+       { IPv4(169,228,64,0),19 },
+       { IPv4(169,228,112,0),20 },
+       { IPv4(169,228,128,0),19 },
+       { IPv4(169,228,160,0),20 },
+       { IPv4(169,229,0,0),16 },
+       { IPv4(169,232,0,0),16 },
+       { IPv4(169,233,0,0),16 },
+       { IPv4(169,237,0,0),16 },
+       { IPv4(170,2,0,0),16 },
+       { IPv4(170,3,0,0),16 },
+       { IPv4(170,5,0,0),16 },
+       { IPv4(170,9,64,0),18 },
+       { IPv4(170,9,192,0),18 },
+       { IPv4(170,11,0,0),16 },
+       { IPv4(170,16,0,0),17 },
+       { IPv4(170,16,0,0),16 },
+       { IPv4(170,16,0,0),23 },
+       { IPv4(170,16,8,0),22 },
+       { IPv4(170,16,14,0),23 },
+       { IPv4(170,16,128,0),17 },
+       { IPv4(170,17,0,0),16 },
+       { IPv4(170,20,0,0),18 },
+       { IPv4(170,20,64,0),19 },
+       { IPv4(170,20,144,0),20 },
+       { IPv4(170,20,160,0),19 },
+       { IPv4(170,20,192,0),18 },
+       { IPv4(170,22,0,0),16 },
+       { IPv4(170,24,0,0),16 },
+       { IPv4(170,25,0,0),16 },
+       { IPv4(170,27,132,0),22 },
+       { IPv4(170,27,201,0),24 },
+       { IPv4(170,28,128,0),20 },
+       { IPv4(170,31,0,0),16 },
+       { IPv4(170,32,0,0),16 },
+       { IPv4(170,35,0,0),16 },
+       { IPv4(170,35,224,0),22 },
+       { IPv4(170,35,228,0),22 },
+       { IPv4(170,35,240,0),22 },
+       { IPv4(170,35,244,0),22 },
+       { IPv4(170,35,248,0),22 },
+       { IPv4(170,35,252,0),22 },
+       { IPv4(170,37,237,0),24 },
+       { IPv4(170,37,238,0),24 },
+       { IPv4(170,37,239,0),24 },
+       { IPv4(170,38,0,0),16 },
+       { IPv4(170,39,0,0),16 },
+       { IPv4(170,46,0,0),16 },
+       { IPv4(170,51,255,0),24 },
+       { IPv4(170,54,0,0),16 },
+       { IPv4(170,54,59,0),24 },
+       { IPv4(170,54,240,0),24 },
+       { IPv4(170,54,241,0),24 },
+       { IPv4(170,55,0,0),16 },
+       { IPv4(170,65,122,0),24 },
+       { IPv4(170,65,123,0),24 },
+       { IPv4(170,65,124,0),24 },
+       { IPv4(170,65,128,0),21 },
+       { IPv4(170,68,0,0),16 },
+       { IPv4(170,70,0,0),16 },
+       { IPv4(170,72,0,0),16 },
+       { IPv4(170,85,0,0),16 },
+       { IPv4(170,91,128,0),18 },
+       { IPv4(170,92,0,0),16 },
+       { IPv4(170,107,0,0),16 },
+       { IPv4(170,108,0,0),16 },
+       { IPv4(170,119,0,0),16 },
+       { IPv4(170,121,0,0),16 },
+       { IPv4(170,128,0,0),16 },
+       { IPv4(170,128,170,0),24 },
+       { IPv4(170,128,175,0),24 },
+       { IPv4(170,131,0,0),19 },
+       { IPv4(170,135,0,0),16 },
+       { IPv4(170,138,0,0),16 },
+       { IPv4(170,138,64,0),21 },
+       { IPv4(170,140,0,0),16 },
+       { IPv4(170,147,0,0),16 },
+       { IPv4(170,152,0,0),16 },
+       { IPv4(170,153,0,0),16 },
+       { IPv4(170,153,0,0),17 },
+       { IPv4(170,153,138,0),23 },
+       { IPv4(170,153,140,0),22 },
+       { IPv4(170,153,144,0),20 },
+       { IPv4(170,153,160,0),19 },
+       { IPv4(170,153,192,0),18 },
+       { IPv4(170,159,0,0),16 },
+       { IPv4(170,160,0,0),16 },
+       { IPv4(170,161,0,0),16 },
+       { IPv4(170,163,0,0),16 },
+       { IPv4(170,165,0,0),16 },
+       { IPv4(170,167,0,0),16 },
+       { IPv4(170,169,46,0),24 },
+       { IPv4(170,169,122,0),24 },
+       { IPv4(170,178,0,0),16 },
+       { IPv4(170,201,0,0),16 },
+       { IPv4(170,202,0,0),16 },
+       { IPv4(170,202,1,0),24 },
+       { IPv4(170,202,3,0),24 },
+       { IPv4(170,202,224,0),19 },
+       { IPv4(170,202,224,0),24 },
+       { IPv4(170,202,224,0),20 },
+       { IPv4(170,202,231,0),24 },
+       { IPv4(170,202,232,0),24 },
+       { IPv4(170,202,233,0),24 },
+       { IPv4(170,202,234,0),24 },
+       { IPv4(170,202,240,0),20 },
+       { IPv4(170,202,241,0),24 },
+       { IPv4(170,202,242,0),24 },
+       { IPv4(170,202,243,0),24 },
+       { IPv4(170,202,254,0),24 },
+       { IPv4(170,206,0,0),16 },
+       { IPv4(170,206,0,0),19 },
+       { IPv4(170,206,32,0),19 },
+       { IPv4(170,206,64,0),19 },
+       { IPv4(170,206,96,0),19 },
+       { IPv4(170,206,128,0),19 },
+       { IPv4(170,206,160,0),19 },
+       { IPv4(170,206,192,0),19 },
+       { IPv4(170,206,224,0),23 },
+       { IPv4(170,206,226,0),23 },
+       { IPv4(170,209,0,0),16 },
+       { IPv4(170,210,16,0),21 },
+       { IPv4(170,215,0,0),16 },
+       { IPv4(170,215,0,0),18 },
+       { IPv4(170,215,15,0),24 },
+       { IPv4(170,215,16,0),20 },
+       { IPv4(170,215,96,0),19 },
+       { IPv4(170,215,128,0),20 },
+       { IPv4(170,215,134,0),24 },
+       { IPv4(170,215,144,0),24 },
+       { IPv4(170,215,145,0),24 },
+       { IPv4(170,215,147,0),24 },
+       { IPv4(170,215,159,0),24 },
+       { IPv4(170,215,160,0),24 },
+       { IPv4(170,215,161,0),24 },
+       { IPv4(170,215,162,0),24 },
+       { IPv4(170,215,163,0),24 },
+       { IPv4(170,215,164,0),24 },
+       { IPv4(170,215,171,0),24 },
+       { IPv4(170,215,175,0),24 },
+       { IPv4(170,215,177,0),24 },
+       { IPv4(170,215,179,0),24 },
+       { IPv4(170,215,184,0),24 },
+       { IPv4(170,215,185,0),24 },
+       { IPv4(170,215,186,0),24 },
+       { IPv4(170,215,187,0),24 },
+       { IPv4(170,215,188,0),24 },
+       { IPv4(170,215,192,0),18 },
+       { IPv4(170,224,0,0),16 },
+       { IPv4(170,224,0,0),20 },
+       { IPv4(170,224,16,0),20 },
+       { IPv4(170,224,240,0),20 },
+       { IPv4(170,235,0,0),16 },
+       { IPv4(170,236,14,0),24 },
+       { IPv4(170,248,95,0),24 },
+       { IPv4(170,248,97,0),24 },
+       { IPv4(170,250,0,0),16 },
+       { IPv4(170,252,123,0),24 },
+       { IPv4(170,252,127,0),24 },
+       { IPv4(170,252,188,0),24 },
+       { IPv4(170,252,191,0),24 },
+       { IPv4(171,27,0,0),16 },
+       { IPv4(171,30,128,0),17 },
+       { IPv4(171,68,0,0),14 },
+       { IPv4(171,72,0,0),16 },
+       { IPv4(172,128,0,0),13 },
+       { IPv4(172,136,0,0),13 },
+       { IPv4(172,144,0,0),13 },
+       { IPv4(172,152,0,0),13 },
+       { IPv4(172,160,0,0),13 },
+       { IPv4(172,168,0,0),13 },
+       { IPv4(172,176,0,0),14 },
+       { IPv4(172,176,0,0),13 },
+       { IPv4(172,180,0,0),16 },
+       { IPv4(172,180,0,0),14 },
+       { IPv4(172,184,0,0),13 },
+       { IPv4(172,187,128,0),17 },
+       { IPv4(172,188,0,0),14 },
+       { IPv4(192,0,32,0),20 },
+       { IPv4(192,0,34,0),24 },
+       { IPv4(192,0,36,0),24 },
+       { IPv4(192,5,4,0),23 },
+       { IPv4(192,5,6,0),24 },
+       { IPv4(192,5,7,0),24 },
+       { IPv4(192,5,14,0),24 },
+       { IPv4(192,5,21,0),24 },
+       { IPv4(192,5,22,0),24 },
+       { IPv4(192,5,23,0),24 },
+       { IPv4(192,5,24,0),24 },
+       { IPv4(192,5,25,0),24 },
+       { IPv4(192,5,27,0),24 },
+       { IPv4(192,5,38,0),24 },
+       { IPv4(192,5,41,0),24 },
+       { IPv4(192,5,47,0),24 },
+       { IPv4(192,5,53,0),24 },
+       { IPv4(192,5,54,0),23 },
+       { IPv4(192,5,55,0),24 },
+       { IPv4(192,5,63,0),24 },
+       { IPv4(192,5,73,0),24 },
+       { IPv4(192,5,100,0),24 },
+       { IPv4(192,5,106,0),24 },
+       { IPv4(192,5,147,0),24 },
+       { IPv4(192,5,148,0),24 },
+       { IPv4(192,5,156,0),24 },
+       { IPv4(192,5,157,0),24 },
+       { IPv4(192,5,162,0),24 },
+       { IPv4(192,5,166,0),24 },
+       { IPv4(192,5,170,0),23 },
+       { IPv4(192,5,172,0),22 },
+       { IPv4(192,5,176,0),20 },
+       { IPv4(192,5,192,0),21 },
+       { IPv4(192,5,200,0),23 },
+       { IPv4(192,5,220,0),24 },
+       { IPv4(192,5,240,0),24 },
+       { IPv4(192,6,2,0),24 },
+       { IPv4(192,6,3,0),24 },
+       { IPv4(192,6,6,0),24 },
+       { IPv4(192,6,7,0),24 },
+       { IPv4(192,6,19,0),24 },
+       { IPv4(192,6,21,0),24 },
+       { IPv4(192,6,23,0),24 },
+       { IPv4(192,6,37,0),24 },
+       { IPv4(192,6,38,0),24 },
+       { IPv4(192,6,39,0),24 },
+       { IPv4(192,6,41,0),24 },
+       { IPv4(192,6,59,0),24 },
+       { IPv4(192,6,71,0),24 },
+       { IPv4(192,6,77,0),24 },
+       { IPv4(192,6,86,0),24 },
+       { IPv4(192,6,89,0),24 },
+       { IPv4(192,6,118,0),24 },
+       { IPv4(192,6,120,0),24 },
+       { IPv4(192,6,121,0),24 },
+       { IPv4(192,6,143,0),24 },
+       { IPv4(192,6,151,0),24 },
+       { IPv4(192,6,202,0),24 },
+       { IPv4(192,6,223,0),24 },
+       { IPv4(192,8,0,0),21 },
+       { IPv4(192,11,236,0),24 },
+       { IPv4(192,12,3,0),24 },
+       { IPv4(192,12,5,0),24 },
+       { IPv4(192,12,7,0),24 },
+       { IPv4(192,12,10,0),24 },
+       { IPv4(192,12,15,0),24 },
+       { IPv4(192,12,29,0),24 },
+       { IPv4(192,12,32,0),24 },
+       { IPv4(192,12,33,0),24 },
+       { IPv4(192,12,65,0),24 },
+       { IPv4(192,12,66,0),24 },
+       { IPv4(192,12,67,0),24 },
+       { IPv4(192,12,68,0),24 },
+       { IPv4(192,12,69,0),24 },
+       { IPv4(192,12,73,0),24 },
+       { IPv4(192,12,82,0),24 },
+       { IPv4(192,12,88,0),24 },
+       { IPv4(192,12,89,0),24 },
+       { IPv4(192,12,90,0),24 },
+       { IPv4(192,12,95,0),24 },
+       { IPv4(192,12,100,0),24 },
+       { IPv4(192,12,123,0),24 },
+       { IPv4(192,12,124,0),24 },
+       { IPv4(192,12,133,0),24 },
+       { IPv4(192,12,134,0),24 },
+       { IPv4(192,12,135,0),24 },
+       { IPv4(192,12,207,0),24 },
+       { IPv4(192,12,210,0),24 },
+       { IPv4(192,12,211,0),24 },
+       { IPv4(192,12,237,0),24 },
+       { IPv4(192,12,240,0),24 },
+       { IPv4(192,16,0,0),19 },
+       { IPv4(192,16,13,0),24 },
+       { IPv4(192,16,167,0),24 },
+       { IPv4(192,16,168,0),24 },
+       { IPv4(192,16,204,0),24 },
+       { IPv4(192,17,0,0),16 },
+       { IPv4(192,18,16,0),22 },
+       { IPv4(192,19,192,0),22 },
+       { IPv4(192,19,196,0),24 },
+       { IPv4(192,19,197,0),24 },
+       { IPv4(192,20,2,0),24 },
+       { IPv4(192,20,3,0),24 },
+       { IPv4(192,20,4,0),24 },
+       { IPv4(192,20,8,0),24 },
+       { IPv4(192,20,11,0),24 },
+       { IPv4(192,20,16,0),24 },
+       { IPv4(192,20,239,0),24 },
+       { IPv4(192,20,245,0),24 },
+       { IPv4(192,20,246,0),24 },
+       { IPv4(192,20,250,0),24 },
+       { IPv4(192,20,251,0),24 },
+       { IPv4(192,20,252,0),24 },
+       { IPv4(192,23,144,0),24 },
+       { IPv4(192,23,168,0),24 },
+       { IPv4(192,24,0,0),16 },
+       { IPv4(192,25,42,0),24 },
+       { IPv4(192,25,46,0),24 },
+       { IPv4(192,25,48,0),24 },
+       { IPv4(192,25,52,0),24 },
+       { IPv4(192,25,91,0),24 },
+       { IPv4(192,25,96,0),23 },
+       { IPv4(192,25,106,0),24 },
+       { IPv4(192,25,114,0),24 },
+       { IPv4(192,25,133,0),24 },
+       { IPv4(192,25,139,0),24 },
+       { IPv4(192,25,140,0),23 },
+       { IPv4(192,25,142,0),24 },
+       { IPv4(192,25,151,0),24 },
+       { IPv4(192,25,155,0),24 },
+       { IPv4(192,25,191,0),24 },
+       { IPv4(192,25,199,0),24 },
+       { IPv4(192,25,204,0),24 },
+       { IPv4(192,25,206,0),24 },
+       { IPv4(192,25,214,0),24 },
+       { IPv4(192,25,216,0),24 },
+       { IPv4(192,25,218,0),24 },
+       { IPv4(192,25,240,0),24 },
+       { IPv4(192,26,10,0),24 },
+       { IPv4(192,26,15,0),24 },
+       { IPv4(192,26,85,0),24 },
+       { IPv4(192,26,89,0),24 },
+       { IPv4(192,26,91,0),24 },
+       { IPv4(192,26,92,0),24 },
+       { IPv4(192,26,147,0),24 },
+       { IPv4(192,26,200,0),24 },
+       { IPv4(192,26,212,0),24 },
+       { IPv4(192,26,214,0),24 },
+       { IPv4(192,26,244,0),23 },
+       { IPv4(192,26,245,0),24 },
+       { IPv4(192,26,251,0),24 },
+       { IPv4(192,27,0,0),16 },
+       { IPv4(192,27,56,0),24 },
+       { IPv4(192,28,0,0),18 },
+       { IPv4(192,28,64,0),19 },
+       { IPv4(192,28,96,0),22 },
+       { IPv4(192,28,254,0),24 },
+       { IPv4(192,30,115,0),24 },
+       { IPv4(192,31,3,0),24 },
+       { IPv4(192,31,7,0),24 },
+       { IPv4(192,31,16,0),24 },
+       { IPv4(192,31,17,0),24 },
+       { IPv4(192,31,18,0),24 },
+       { IPv4(192,31,19,0),24 },
+       { IPv4(192,31,20,0),24 },
+       { IPv4(192,31,21,0),24 },
+       { IPv4(192,31,31,0),24 },
+       { IPv4(192,31,74,0),24 },
+       { IPv4(192,31,80,0),24 },
+       { IPv4(192,31,90,0),24 },
+       { IPv4(192,31,96,0),24 },
+       { IPv4(192,31,106,0),24 },
+       { IPv4(192,31,112,0),24 },
+       { IPv4(192,31,146,0),24 },
+       { IPv4(192,31,153,0),24 },
+       { IPv4(192,31,161,0),24 },
+       { IPv4(192,31,174,0),24 },
+       { IPv4(192,31,177,0),24 },
+       { IPv4(192,31,178,0),24 },
+       { IPv4(192,31,179,0),24 },
+       { IPv4(192,31,238,0),23 },
+       { IPv4(192,31,239,0),24 },
+       { IPv4(192,31,246,0),24 },
+       { IPv4(192,33,5,0),24 },
+       { IPv4(192,33,6,0),23 },
+       { IPv4(192,33,8,0),23 },
+       { IPv4(192,33,10,0),24 },
+       { IPv4(192,33,13,0),24 },
+       { IPv4(192,33,14,0),24 },
+       { IPv4(192,33,19,0),24 },
+       { IPv4(192,33,140,0),23 },
+       { IPv4(192,33,186,0),24 },
+       { IPv4(192,33,240,0),24 },
+       { IPv4(192,34,239,0),24 },
+       { IPv4(192,35,20,0),24 },
+       { IPv4(192,35,29,0),24 },
+       { IPv4(192,35,44,0),24 },
+       { IPv4(192,35,51,0),24 },
+       { IPv4(192,35,75,0),24 },
+       { IPv4(192,35,76,0),24 },
+       { IPv4(192,35,82,0),24 },
+       { IPv4(192,35,83,0),24 },
+       { IPv4(192,35,84,0),24 },
+       { IPv4(192,35,99,0),24 },
+       { IPv4(192,35,105,0),24 },
+       { IPv4(192,35,133,0),24 },
+       { IPv4(192,35,142,0),24 },
+       { IPv4(192,35,154,0),24 },
+       { IPv4(192,35,156,0),24 },
+       { IPv4(192,35,171,0),24 },
+       { IPv4(192,35,174,0),24 },
+       { IPv4(192,35,193,0),24 },
+       { IPv4(192,35,208,0),24 },
+       { IPv4(192,35,209,0),24 },
+       { IPv4(192,35,210,0),24 },
+       { IPv4(192,35,217,0),24 },
+       { IPv4(192,35,218,0),24 },
+       { IPv4(192,35,221,0),24 },
+       { IPv4(192,35,222,0),24 },
+       { IPv4(192,35,224,0),24 },
+       { IPv4(192,35,225,0),24 },
+       { IPv4(192,35,226,0),24 },
+       { IPv4(192,35,227,0),24 },
+       { IPv4(192,35,228,0),24 },
+       { IPv4(192,36,95,0),24 },
+       { IPv4(192,39,0,0),16 },
+       { IPv4(192,39,122,0),24 },
+       { IPv4(192,39,124,0),24 },
+       { IPv4(192,40,16,0),22 },
+       { IPv4(192,40,29,0),24 },
+       { IPv4(192,40,65,0),24 },
+       { IPv4(192,40,72,0),21 },
+       { IPv4(192,40,80,0),24 },
+       { IPv4(192,40,254,0),24 },
+       { IPv4(192,41,0,0),18 },
+       { IPv4(192,41,64,0),24 },
+       { IPv4(192,41,70,0),24 },
+       { IPv4(192,41,80,0),24 },
+       { IPv4(192,41,162,0),24 },
+       { IPv4(192,41,170,0),24 },
+       { IPv4(192,41,197,0),24 },
+       { IPv4(192,41,204,0),24 },
+       { IPv4(192,41,206,0),24 },
+       { IPv4(192,41,213,0),24 },
+       { IPv4(192,41,214,0),24 },
+       { IPv4(192,41,249,0),24 },
+       { IPv4(192,42,41,0),24 },
+       { IPv4(192,42,55,0),24 },
+       { IPv4(192,42,70,0),24 },
+       { IPv4(192,42,75,0),24 },
+       { IPv4(192,42,76,0),24 },
+       { IPv4(192,42,77,0),24 },
+       { IPv4(192,42,78,0),24 },
+       { IPv4(192,42,79,0),24 },
+       { IPv4(192,42,80,0),23 },
+       { IPv4(192,42,82,0),24 },
+       { IPv4(192,42,93,0),24 },
+       { IPv4(192,42,98,0),24 },
+       { IPv4(192,42,99,0),24 },
+       { IPv4(192,42,141,0),24 },
+       { IPv4(192,42,142,0),24 },
+       { IPv4(192,42,179,0),24 },
+       { IPv4(192,42,181,0),24 },
+       { IPv4(192,42,182,0),24 },
+       { IPv4(192,42,238,0),24 },
+       { IPv4(192,42,248,0),24 },
+       { IPv4(192,43,64,0),18 },
+       { IPv4(192,43,185,0),24 },
+       { IPv4(192,43,197,0),24 },
+       { IPv4(192,43,217,0),24 },
+       { IPv4(192,43,219,0),24 },
+       { IPv4(192,43,235,0),24 },
+       { IPv4(192,43,240,0),24 },
+       { IPv4(192,43,244,0),24 },
+       { IPv4(192,43,253,0),24 },
+       { IPv4(192,44,253,0),24 },
+       { IPv4(192,45,155,0),24 },
+       { IPv4(192,46,2,0),24 },
+       { IPv4(192,46,4,0),24 },
+       { IPv4(192,46,6,0),24 },
+       { IPv4(192,46,47,0),24 },
+       { IPv4(192,46,54,0),24 },
+       { IPv4(192,46,108,0),24 },
+       { IPv4(192,47,42,0),24 },
+       { IPv4(192,47,44,0),24 },
+       { IPv4(192,47,117,0),24 },
+       { IPv4(192,47,241,0),24 },
+       { IPv4(192,47,243,0),24 },
+       { IPv4(192,48,33,0),24 },
+       { IPv4(192,48,80,0),24 },
+       { IPv4(192,48,97,0),24 },
+       { IPv4(192,48,106,0),24 },
+       { IPv4(192,48,125,0),24 },
+       { IPv4(192,48,212,0),22 },
+       { IPv4(192,48,222,0),24 },
+       { IPv4(192,48,242,0),24 },
+       { IPv4(192,48,245,0),24 },
+       { IPv4(192,50,17,0),24 },
+       { IPv4(192,50,65,0),24 },
+       { IPv4(192,50,74,0),23 },
+       { IPv4(192,50,76,0),23 },
+       { IPv4(192,50,105,0),24 },
+       { IPv4(192,50,110,0),24 },
+       { IPv4(192,50,240,0),24 },
+       { IPv4(192,51,41,0),24 },
+       { IPv4(192,51,144,0),21 },
+       { IPv4(192,51,180,0),22 },
+       { IPv4(192,52,59,0),24 },
+       { IPv4(192,52,83,0),24 },
+       { IPv4(192,52,85,0),24 },
+       { IPv4(192,52,86,0),24 },
+       { IPv4(192,52,88,0),24 },
+       { IPv4(192,52,89,0),24 },
+       { IPv4(192,52,90,0),24 },
+       { IPv4(192,52,91,0),24 },
+       { IPv4(192,52,106,0),24 },
+       { IPv4(192,52,117,0),24 },
+       { IPv4(192,52,183,0),24 },
+       { IPv4(192,52,184,0),24 },
+       { IPv4(192,52,220,0),24 },
+       { IPv4(192,53,35,0),24 },
+       { IPv4(192,54,36,0),24 },
+       { IPv4(192,54,43,0),24 },
+       { IPv4(192,54,45,0),24 },
+       { IPv4(192,54,129,0),24 },
+       { IPv4(192,54,250,0),24 },
+       { IPv4(192,54,253,0),24 },
+       { IPv4(192,55,1,0),24 },
+       { IPv4(192,55,87,0),24 },
+       { IPv4(192,55,90,0),23 },
+       { IPv4(192,55,95,0),24 },
+       { IPv4(192,55,106,0),24 },
+       { IPv4(192,55,120,0),24 },
+       { IPv4(192,55,122,0),24 },
+       { IPv4(192,55,123,0),24 },
+       { IPv4(192,55,124,0),24 },
+       { IPv4(192,55,133,0),24 },
+       { IPv4(192,55,137,0),24 },
+       { IPv4(192,55,138,0),23 },
+       { IPv4(192,55,140,0),22 },
+       { IPv4(192,55,144,0),20 },
+       { IPv4(192,55,160,0),20 },
+       { IPv4(192,55,176,0),21 },
+       { IPv4(192,55,184,0),23 },
+       { IPv4(192,55,186,0),24 },
+       { IPv4(192,55,199,0),24 },
+       { IPv4(192,55,208,0),24 },
+       { IPv4(192,55,210,0),24 },
+       { IPv4(192,55,214,0),24 },
+       { IPv4(192,55,229,0),24 },
+       { IPv4(192,55,240,0),24 },
+       { IPv4(192,56,52,0),24 },
+       { IPv4(192,56,191,0),24 },
+       { IPv4(192,56,231,0),24 },
+       { IPv4(192,58,19,0),24 },
+       { IPv4(192,58,24,0),23 },
+       { IPv4(192,58,36,0),24 },
+       { IPv4(192,58,107,0),24 },
+       { IPv4(192,58,159,0),24 },
+       { IPv4(192,58,172,0),24 },
+       { IPv4(192,58,181,0),24 },
+       { IPv4(192,58,183,0),24 },
+       { IPv4(192,58,184,0),21 },
+       { IPv4(192,58,199,0),24 },
+       { IPv4(192,58,212,0),24 },
+       { IPv4(192,58,220,0),24 },
+       { IPv4(192,58,221,0),24 },
+       { IPv4(192,58,222,0),24 },
+       { IPv4(192,58,223,0),24 },
+       { IPv4(192,58,244,0),24 },
+       { IPv4(192,63,0,0),16 },
+       { IPv4(192,64,157,0),24 },
+       { IPv4(192,65,97,0),24 },
+       { IPv4(192,65,141,0),24 },
+       { IPv4(192,65,144,0),24 },
+       { IPv4(192,65,146,0),24 },
+       { IPv4(192,65,153,0),24 },
+       { IPv4(192,65,171,0),24 },
+       { IPv4(192,65,176,0),24 },
+       { IPv4(192,65,201,0),24 },
+       { IPv4(192,65,202,0),24 },
+       { IPv4(192,65,224,0),24 },
+       { IPv4(192,65,226,0),24 },
+       { IPv4(192,65,228,0),24 },
+       { IPv4(192,67,13,0),24 },
+       { IPv4(192,67,14,0),24 },
+       { IPv4(192,67,21,0),24 },
+       { IPv4(192,67,45,0),24 },
+       { IPv4(192,67,48,0),24 },
+       { IPv4(192,67,53,0),24 },
+       { IPv4(192,67,80,0),24 },
+       { IPv4(192,67,81,0),24 },
+       { IPv4(192,67,82,0),24 },
+       { IPv4(192,67,83,0),24 },
+       { IPv4(192,67,93,0),24 },
+       { IPv4(192,67,96,0),24 },
+       { IPv4(192,67,107,0),24 },
+       { IPv4(192,67,108,0),24 },
+       { IPv4(192,67,109,0),24 },
+       { IPv4(192,67,112,0),24 },
+       { IPv4(192,67,113,0),24 },
+       { IPv4(192,67,157,0),24 },
+       { IPv4(192,67,166,0),24 },
+       { IPv4(192,67,173,0),24 },
+       { IPv4(192,67,209,0),24 },
+       { IPv4(192,67,236,0),22 },
+       { IPv4(192,67,240,0),21 },
+       { IPv4(192,67,251,0),24 },
+       { IPv4(192,68,22,0),24 },
+       { IPv4(192,68,52,0),24 },
+       { IPv4(192,68,108,0),24 },
+       { IPv4(192,68,148,0),24 },
+       { IPv4(192,68,162,0),24 },
+       { IPv4(192,68,171,0),24 },
+       { IPv4(192,68,172,0),24 },
+       { IPv4(192,68,183,0),24 },
+       { IPv4(192,68,189,0),24 },
+       { IPv4(192,68,202,0),24 },
+       { IPv4(192,68,227,0),24 },
+       { IPv4(192,69,46,0),24 },
+       { IPv4(192,69,66,0),24 },
+       { IPv4(192,69,190,0),24 },
+       { IPv4(192,70,125,0),24 },
+       { IPv4(192,70,160,0),24 },
+       { IPv4(192,70,162,0),24 },
+       { IPv4(192,70,175,0),24 },
+       { IPv4(192,70,186,0),24 },
+       { IPv4(192,70,204,0),24 },
+       { IPv4(192,70,211,0),24 },
+       { IPv4(192,70,231,0),24 },
+       { IPv4(192,70,236,0),24 },
+       { IPv4(192,70,237,0),24 },
+       { IPv4(192,70,239,0),24 },
+       { IPv4(192,70,244,0),24 },
+       { IPv4(192,70,245,0),24 },
+       { IPv4(192,70,249,0),24 },
+       { IPv4(192,71,115,0),24 },
+       { IPv4(192,71,129,0),24 },
+       { IPv4(192,71,130,0),24 },
+       { IPv4(192,71,199,0),24 },
+       { IPv4(192,71,213,0),24 },
+       { IPv4(192,72,0,0),16 },
+       { IPv4(192,72,80,0),23 },
+       { IPv4(192,73,3,0),24 },
+       { IPv4(192,73,7,0),24 },
+       { IPv4(192,73,25,0),24 },
+       { IPv4(192,73,26,0),23 },
+       { IPv4(192,73,28,0),23 },
+       { IPv4(192,73,57,0),24 },
+       { IPv4(192,73,60,0),24 },
+       { IPv4(192,73,62,0),24 },
+       { IPv4(192,73,64,0),24 },
+       { IPv4(192,73,207,0),24 },
+       { IPv4(192,73,208,0),22 },
+       { IPv4(192,73,212,0),24 },
+       { IPv4(192,73,213,0),24 },
+       { IPv4(192,73,216,0),24 },
+       { IPv4(192,73,220,0),24 },
+       { IPv4(192,73,228,0),24 },
+       { IPv4(192,74,216,0),24 },
+       { IPv4(192,75,17,0),24 },
+       { IPv4(192,75,48,0),24 },
+       { IPv4(192,75,49,0),24 },
+       { IPv4(192,75,99,0),24 },
+       { IPv4(192,75,104,0),24 },
+       { IPv4(192,75,120,0),22 },
+       { IPv4(192,75,131,0),24 },
+       { IPv4(192,75,134,0),24 },
+       { IPv4(192,75,137,0),24 },
+       { IPv4(192,75,178,0),24 },
+       { IPv4(192,75,238,0),24 },
+       { IPv4(192,76,121,0),24 },
+       { IPv4(192,76,133,0),24 },
+       { IPv4(192,76,151,0),24 },
+       { IPv4(192,76,175,0),24 },
+       { IPv4(192,76,177,0),24 },
+       { IPv4(192,76,178,0),24 },
+       { IPv4(192,76,184,0),24 },
+       { IPv4(192,76,237,0),24 },
+       { IPv4(192,76,238,0),24 },
+       { IPv4(192,76,239,0),24 },
+       { IPv4(192,76,249,0),24 },
+       { IPv4(192,77,9,0),24 },
+       { IPv4(192,77,14,0),24 },
+       { IPv4(192,77,27,0),24 },
+       { IPv4(192,77,30,0),24 },
+       { IPv4(192,77,31,0),24 },
+       { IPv4(192,77,32,0),24 },
+       { IPv4(192,77,33,0),24 },
+       { IPv4(192,77,36,0),24 },
+       { IPv4(192,77,40,0),24 },
+       { IPv4(192,77,43,0),24 },
+       { IPv4(192,77,44,0),24 },
+       { IPv4(192,77,45,0),24 },
+       { IPv4(192,77,77,0),24 },
+       { IPv4(192,77,84,0),24 },
+       { IPv4(192,77,86,0),24 },
+       { IPv4(192,77,87,0),24 },
+       { IPv4(192,77,88,0),24 },
+       { IPv4(192,77,95,0),24 },
+       { IPv4(192,77,147,0),24 },
+       { IPv4(192,77,161,0),24 },
+       { IPv4(192,77,173,0),24 },
+       { IPv4(192,77,175,0),24 },
+       { IPv4(192,77,198,0),24 },
+       { IPv4(192,77,205,0),24 },
+       { IPv4(192,77,209,0),24 },
+       { IPv4(192,77,210,0),24 },
+       { IPv4(192,78,99,0),24 },
+       { IPv4(192,79,238,0),24 },
+       { IPv4(192,80,12,0),22 },
+       { IPv4(192,80,16,0),24 },
+       { IPv4(192,80,17,0),24 },
+       { IPv4(192,80,29,0),24 },
+       { IPv4(192,80,30,0),24 },
+       { IPv4(192,80,43,0),24 },
+       { IPv4(192,80,64,0),24 },
+       { IPv4(192,80,68,0),24 },
+       { IPv4(192,80,211,0),24 },
+       { IPv4(192,81,48,0),24 },
+       { IPv4(192,81,67,0),24 },
+       { IPv4(192,81,68,0),24 },
+       { IPv4(192,81,69,0),24 },
+       { IPv4(192,82,0,0),19 },
+       { IPv4(192,82,104,0),24 },
+       { IPv4(192,82,113,0),24 },
+       { IPv4(192,82,115,0),24 },
+       { IPv4(192,82,118,0),24 },
+       { IPv4(192,82,122,0),24 },
+       { IPv4(192,82,142,0),24 },
+       { IPv4(192,83,111,0),24 },
+       { IPv4(192,83,119,0),24 },
+       { IPv4(192,83,159,0),24 },
+       { IPv4(192,83,166,0),23 },
+       { IPv4(192,83,168,0),21 },
+       { IPv4(192,83,171,0),24 },
+       { IPv4(192,83,176,0),24 },
+       { IPv4(192,83,176,0),20 },
+       { IPv4(192,83,180,0),24 },
+       { IPv4(192,83,192,0),24 },
+       { IPv4(192,83,192,0),22 },
+       { IPv4(192,83,196,0),24 },
+       { IPv4(192,83,203,0),24 },
+       { IPv4(192,83,224,0),24 },
+       { IPv4(192,83,228,0),24 },
+       { IPv4(192,83,232,0),24 },
+       { IPv4(192,83,242,0),24 },
+       { IPv4(192,83,246,0),24 },
+       { IPv4(192,83,249,0),24 },
+       { IPv4(192,83,253,0),24 },
+       { IPv4(192,84,8,0),24 },
+       { IPv4(192,84,20,0),24 },
+       { IPv4(192,84,22,0),24 },
+       { IPv4(192,84,88,0),24 },
+       { IPv4(192,84,119,0),24 },
+       { IPv4(192,84,122,0),23 },
+       { IPv4(192,84,171,0),24 },
+       { IPv4(192,84,218,0),24 },
+       { IPv4(192,84,221,0),24 },
+       { IPv4(192,84,243,0),24 },
+       { IPv4(192,84,252,0),24 },
+       { IPv4(192,85,16,0),23 },
+       { IPv4(192,85,241,0),24 },
+       { IPv4(192,85,242,0),24 },
+       { IPv4(192,86,6,0),24 },
+       { IPv4(192,86,7,0),24 },
+       { IPv4(192,86,8,0),24 },
+       { IPv4(192,86,9,0),24 },
+       { IPv4(192,86,19,0),24 },
+       { IPv4(192,86,20,0),24 },
+       { IPv4(192,86,21,0),24 },
+       { IPv4(192,86,22,0),24 },
+       { IPv4(192,86,66,0),24 },
+       { IPv4(192,86,70,0),24 },
+       { IPv4(192,86,71,0),24 },
+       { IPv4(192,86,72,0),22 },
+       { IPv4(192,86,77,0),24 },
+       { IPv4(192,86,78,0),24 },
+       { IPv4(192,86,80,0),24 },
+       { IPv4(192,86,93,0),24 },
+       { IPv4(192,86,96,0),24 },
+       { IPv4(192,86,110,0),24 },
+       { IPv4(192,86,112,0),21 },
+       { IPv4(192,86,126,0),24 },
+       { IPv4(192,86,139,0),24 },
+       { IPv4(192,86,226,0),24 },
+       { IPv4(192,86,228,0),24 },
+       { IPv4(192,86,230,0),24 },
+       { IPv4(192,86,232,0),21 },
+       { IPv4(192,86,253,0),24 },
+       { IPv4(192,87,176,0),24 },
+       { IPv4(192,88,11,0),24 },
+       { IPv4(192,88,26,0),24 },
+       { IPv4(192,88,42,0),24 },
+       { IPv4(192,88,87,0),24 },
+       { IPv4(192,88,99,0),24 },
+       { IPv4(192,88,110,0),24 },
+       { IPv4(192,88,111,0),24 },
+       { IPv4(192,88,112,0),24 },
+       { IPv4(192,88,114,0),24 },
+       { IPv4(192,88,115,0),24 },
+       { IPv4(192,88,201,0),24 },
+       { IPv4(192,88,205,0),24 },
+       { IPv4(192,88,209,0),24 },
+       { IPv4(192,88,210,0),24 },
+       { IPv4(192,88,212,0),24 },
+       { IPv4(192,88,248,0),24 },
+       { IPv4(192,91,73,0),24 },
+       { IPv4(192,91,75,0),24 },
+       { IPv4(192,91,137,0),24 },
+       { IPv4(192,91,138,0),24 },
+       { IPv4(192,91,152,0),24 },
+       { IPv4(192,91,154,0),24 },
+       { IPv4(192,91,159,0),24 },
+       { IPv4(192,91,171,0),24 },
+       { IPv4(192,91,198,0),24 },
+       { IPv4(192,91,201,0),24 },
+       { IPv4(192,91,205,0),24 },
+       { IPv4(192,92,22,0),24 },
+       { IPv4(192,92,30,0),24 },
+       { IPv4(192,92,56,0),24 },
+       { IPv4(192,92,62,0),24 },
+       { IPv4(192,92,63,0),24 },
+       { IPv4(192,92,78,0),24 },
+       { IPv4(192,92,83,0),24 },
+       { IPv4(192,92,90,0),24 },
+       { IPv4(192,92,92,0),24 },
+       { IPv4(192,92,112,0),24 },
+       { IPv4(192,92,115,0),24 },
+       { IPv4(192,92,159,0),24 },
+       { IPv4(192,92,167,0),24 },
+       { IPv4(192,92,168,0),24 },
+       { IPv4(192,92,199,0),24 },
+       { IPv4(192,94,9,0),24 },
+       { IPv4(192,94,38,0),23 },
+       { IPv4(192,94,40,0),24 },
+       { IPv4(192,94,41,0),24 },
+       { IPv4(192,94,47,0),24 },
+       { IPv4(192,94,52,0),24 },
+       { IPv4(192,94,54,0),24 },
+       { IPv4(192,94,59,0),24 },
+       { IPv4(192,94,60,0),24 },
+       { IPv4(192,94,61,0),24 },
+       { IPv4(192,94,65,0),24 },
+       { IPv4(192,94,67,0),24 },
+       { IPv4(192,94,75,0),24 },
+       { IPv4(192,94,94,0),24 },
+       { IPv4(192,94,118,0),24 },
+       { IPv4(192,94,202,0),24 },
+       { IPv4(192,94,210,0),24 },
+       { IPv4(192,94,233,0),24 },
+       { IPv4(192,94,241,0),24 },
+       { IPv4(192,94,242,0),24 },
+       { IPv4(192,94,249,0),24 },
+       { IPv4(192,96,1,0),24 },
+       { IPv4(192,96,2,0),24 },
+       { IPv4(192,96,3,0),24 },
+       { IPv4(192,96,5,0),24 },
+       { IPv4(192,96,6,0),23 },
+       { IPv4(192,96,7,0),24 },
+       { IPv4(192,96,8,0),23 },
+       { IPv4(192,96,8,0),24 },
+       { IPv4(192,96,10,0),24 },
+       { IPv4(192,96,11,0),24 },
+       { IPv4(192,96,12,0),24 },
+       { IPv4(192,96,13,0),24 },
+       { IPv4(192,96,14,0),24 },
+       { IPv4(192,96,15,0),24 },
+       { IPv4(192,96,20,0),23 },
+       { IPv4(192,96,34,0),24 },
+       { IPv4(192,96,36,0),23 },
+       { IPv4(192,96,38,0),24 },
+       { IPv4(192,96,46,0),24 },
+       { IPv4(192,96,57,0),24 },
+       { IPv4(192,96,74,0),24 },
+       { IPv4(192,96,79,0),24 },
+       { IPv4(192,96,80,0),22 },
+       { IPv4(192,96,84,0),23 },
+       { IPv4(192,96,89,0),24 },
+       { IPv4(192,96,90,0),23 },
+       { IPv4(192,96,92,0),24 },
+       { IPv4(192,96,94,0),23 },
+       { IPv4(192,96,106,0),24 },
+       { IPv4(192,96,109,0),24 },
+       { IPv4(192,96,120,0),21 },
+       { IPv4(192,96,128,0),22 },
+       { IPv4(192,96,133,0),24 },
+       { IPv4(192,96,134,0),24 },
+       { IPv4(192,96,135,0),24 },
+       { IPv4(192,96,136,0),23 },
+       { IPv4(192,96,139,0),24 },
+       { IPv4(192,96,140,0),24 },
+       { IPv4(192,96,142,0),24 },
+       { IPv4(192,96,143,0),24 },
+       { IPv4(192,96,145,0),24 },
+       { IPv4(192,96,150,0),24 },
+       { IPv4(192,96,193,0),24 },
+       { IPv4(192,96,194,0),24 },
+       { IPv4(192,96,246,0),24 },
+       { IPv4(192,96,247,0),24 },
+       { IPv4(192,96,248,0),23 },
+       { IPv4(192,96,251,0),24 },
+       { IPv4(192,96,252,0),24 },
+       { IPv4(192,97,38,0),24 },
+       { IPv4(192,100,1,0),24 },
+       { IPv4(192,100,2,0),24 },
+       { IPv4(192,100,4,0),24 },
+       { IPv4(192,100,5,0),24 },
+       { IPv4(192,100,9,0),24 },
+       { IPv4(192,100,12,0),24 },
+       { IPv4(192,100,16,0),24 },
+       { IPv4(192,100,53,0),24 },
+       { IPv4(192,100,55,0),24 },
+       { IPv4(192,100,59,0),24 },
+       { IPv4(192,100,65,0),24 },
+       { IPv4(192,100,69,0),24 },
+       { IPv4(192,100,70,0),24 },
+       { IPv4(192,100,91,0),24 },
+       { IPv4(192,100,92,0),24 },
+       { IPv4(192,100,158,0),24 },
+       { IPv4(192,100,161,0),24 },
+       { IPv4(192,100,162,0),24 },
+       { IPv4(192,100,163,0),24 },
+       { IPv4(192,100,164,0),24 },
+       { IPv4(192,100,165,0),24 },
+       { IPv4(192,100,170,0),24 },
+       { IPv4(192,100,172,0),24 },
+       { IPv4(192,100,174,0),24 },
+       { IPv4(192,100,176,0),24 },
+       { IPv4(192,100,179,0),24 },
+       { IPv4(192,100,180,0),24 },
+       { IPv4(192,100,181,0),24 },
+       { IPv4(192,100,183,0),24 },
+       { IPv4(192,100,189,0),24 },
+       { IPv4(192,100,190,0),24 },
+       { IPv4(192,100,193,0),24 },
+       { IPv4(192,100,194,0),24 },
+       { IPv4(192,100,195,0),24 },
+       { IPv4(192,100,196,0),24 },
+       { IPv4(192,100,199,0),24 },
+       { IPv4(192,100,200,0),24 },
+       { IPv4(192,100,201,0),24 },
+       { IPv4(192,100,204,0),24 },
+       { IPv4(192,100,208,0),24 },
+       { IPv4(192,100,212,0),24 },
+       { IPv4(192,100,213,0),24 },
+       { IPv4(192,100,218,0),24 },
+       { IPv4(192,100,220,0),24 },
+       { IPv4(192,100,221,0),24 },
+       { IPv4(192,100,230,0),24 },
+       { IPv4(192,100,234,0),24 },
+       { IPv4(192,101,17,0),24 },
+       { IPv4(192,101,31,0),24 },
+       { IPv4(192,101,34,0),24 },
+       { IPv4(192,101,42,0),24 },
+       { IPv4(192,101,44,0),24 },
+       { IPv4(192,101,77,0),24 },
+       { IPv4(192,101,98,0),24 },
+       { IPv4(192,101,100,0),22 },
+       { IPv4(192,101,104,0),22 },
+       { IPv4(192,101,108,0),23 },
+       { IPv4(192,101,120,0),21 },
+       { IPv4(192,101,128,0),22 },
+       { IPv4(192,101,132,0),23 },
+       { IPv4(192,101,135,0),24 },
+       { IPv4(192,101,136,0),24 },
+       { IPv4(192,101,138,0),24 },
+       { IPv4(192,101,141,0),24 },
+       { IPv4(192,101,144,0),24 },
+       { IPv4(192,101,148,0),24 },
+       { IPv4(192,101,150,0),23 },
+       { IPv4(192,101,190,0),24 },
+       { IPv4(192,101,191,0),24 },
+       { IPv4(192,102,9,0),24 },
+       { IPv4(192,102,10,0),24 },
+       { IPv4(192,102,12,0),24 },
+       { IPv4(192,102,15,0),24 },
+       { IPv4(192,102,44,0),24 },
+       { IPv4(192,102,90,0),24 },
+       { IPv4(192,102,190,0),23 },
+       { IPv4(192,102,196,0),24 },
+       { IPv4(192,102,197,0),24 },
+       { IPv4(192,102,198,0),24 },
+       { IPv4(192,102,199,0),24 },
+       { IPv4(192,102,200,0),24 },
+       { IPv4(192,102,201,0),24 },
+       { IPv4(192,102,202,0),24 },
+       { IPv4(192,102,216,0),24 },
+       { IPv4(192,102,219,0),24 },
+       { IPv4(192,102,226,0),24 },
+       { IPv4(192,102,230,0),24 },
+       { IPv4(192,102,231,0),24 },
+       { IPv4(192,102,233,0),24 },
+       { IPv4(192,102,234,0),24 },
+       { IPv4(192,102,236,0),24 },
+       { IPv4(192,102,243,0),24 },
+       { IPv4(192,102,244,0),22 },
+       { IPv4(192,102,249,0),24 },
+       { IPv4(192,102,253,0),24 },
+       { IPv4(192,103,8,0),24 },
+       { IPv4(192,103,11,0),24 },
+       { IPv4(192,103,13,0),24 },
+       { IPv4(192,103,41,0),24 },
+       { IPv4(192,103,148,0),23 },
+       { IPv4(192,103,149,0),24 },
+       { IPv4(192,103,151,0),24 },
+       { IPv4(192,103,152,0),24 },
+       { IPv4(192,103,154,0),24 },
+       { IPv4(192,103,155,0),24 },
+       { IPv4(192,103,156,0),22 },
+       { IPv4(192,103,158,0),23 },
+       { IPv4(192,103,160,0),23 },
+       { IPv4(192,103,161,0),24 },
+       { IPv4(192,103,162,0),24 },
+       { IPv4(192,103,175,0),24 },
+       { IPv4(192,103,176,0),24 },
+       { IPv4(192,103,179,0),24 },
+       { IPv4(192,103,180,0),22 },
+       { IPv4(192,103,182,0),23 },
+       { IPv4(192,103,184,0),22 },
+       { IPv4(192,103,186,0),23 },
+       { IPv4(192,103,188,0),24 },
+       { IPv4(192,103,190,0),23 },
+       { IPv4(192,103,191,0),24 },
+       { IPv4(192,103,192,0),24 },
+       { IPv4(192,103,194,0),23 },
+       { IPv4(192,103,196,0),22 },
+       { IPv4(192,103,198,0),23 },
+       { IPv4(192,103,200,0),22 },
+       { IPv4(192,103,202,0),23 },
+       { IPv4(192,103,204,0),22 },
+       { IPv4(192,103,208,0),23 },
+       { IPv4(192,103,210,0),24 },
+       { IPv4(192,103,229,0),24 },
+       { IPv4(192,103,230,0),23 },
+       { IPv4(192,103,232,0),22 },
+       { IPv4(192,103,236,0),23 },
+       { IPv4(192,103,237,0),24 },
+       { IPv4(192,104,1,0),24 },
+       { IPv4(192,104,15,0),24 },
+       { IPv4(192,104,26,0),24 },
+       { IPv4(192,104,65,0),24 },
+       { IPv4(192,104,79,0),24 },
+       { IPv4(192,104,107,0),24 },
+       { IPv4(192,104,108,0),24 },
+       { IPv4(192,104,109,0),24 },
+       { IPv4(192,104,110,0),24 },
+       { IPv4(192,104,153,0),24 },
+       { IPv4(192,104,156,0),24 },
+       { IPv4(192,104,166,0),24 },
+       { IPv4(192,104,171,0),24 },
+       { IPv4(192,104,179,0),24 },
+       { IPv4(192,104,182,0),23 },
+       { IPv4(192,104,186,0),24 },
+       { IPv4(192,104,187,0),24 },
+       { IPv4(192,104,191,0),24 },
+       { IPv4(192,104,214,0),24 },
+       { IPv4(192,104,244,0),24 },
+       { IPv4(192,105,49,0),24 },
+       { IPv4(192,105,254,0),24 },
+       { IPv4(192,106,192,0),24 },
+       { IPv4(192,107,3,0),24 },
+       { IPv4(192,107,28,0),24 },
+       { IPv4(192,107,41,0),24 },
+       { IPv4(192,107,43,0),24 },
+       { IPv4(192,107,44,0),24 },
+       { IPv4(192,107,45,0),24 },
+       { IPv4(192,107,46,0),24 },
+       { IPv4(192,107,103,0),24 },
+       { IPv4(192,107,108,0),24 },
+       { IPv4(192,107,111,0),24 },
+       { IPv4(192,107,123,0),24 },
+       { IPv4(192,107,134,0),24 },
+       { IPv4(192,107,165,0),24 },
+       { IPv4(192,107,166,0),24 },
+       { IPv4(192,107,167,0),24 },
+       { IPv4(192,107,173,0),24 },
+       { IPv4(192,107,175,0),24 },
+       { IPv4(192,107,189,0),24 },
+       { IPv4(192,107,190,0),24 },
+       { IPv4(192,107,191,0),24 },
+       { IPv4(192,107,193,0),24 },
+       { IPv4(192,107,195,0),24 },
+       { IPv4(192,107,196,0),24 },
+       { IPv4(192,108,2,0),23 },
+       { IPv4(192,108,4,0),22 },
+       { IPv4(192,108,8,0),21 },
+       { IPv4(192,108,19,0),24 },
+       { IPv4(192,108,20,0),24 },
+       { IPv4(192,108,21,0),24 },
+       { IPv4(192,108,98,0),24 },
+       { IPv4(192,108,104,0),24 },
+       { IPv4(192,108,105,0),24 },
+       { IPv4(192,108,106,0),24 },
+       { IPv4(192,108,124,0),24 },
+       { IPv4(192,108,176,0),21 },
+       { IPv4(192,108,179,0),24 },
+       { IPv4(192,108,184,0),24 },
+       { IPv4(192,108,186,0),24 },
+       { IPv4(192,108,192,0),24 },
+       { IPv4(192,108,222,0),23 },
+       { IPv4(192,108,225,0),24 },
+       { IPv4(192,108,235,0),24 },
+       { IPv4(192,108,243,0),24 },
+       { IPv4(192,109,81,0),24 },
+       { IPv4(192,109,142,0),24 },
+       { IPv4(192,109,199,0),24 },
+       { IPv4(192,109,213,0),24 },
+       { IPv4(192,109,216,0),24 },
+       { IPv4(192,110,64,0),20 },
+       { IPv4(192,111,36,0),24 },
+       { IPv4(192,111,47,0),24 },
+       { IPv4(192,111,52,0),24 },
+       { IPv4(192,111,53,0),24 },
+       { IPv4(192,111,89,0),24 },
+       { IPv4(192,111,104,0),24 },
+       { IPv4(192,111,110,0),24 },
+       { IPv4(192,111,116,0),23 },
+       { IPv4(192,111,116,0),24 },
+       { IPv4(192,111,121,0),24 },
+       { IPv4(192,111,213,0),24 },
+       { IPv4(192,111,219,0),24 },
+       { IPv4(192,111,221,0),24 },
+       { IPv4(192,111,225,0),24 },
+       { IPv4(192,111,226,0),24 },
+       { IPv4(192,111,227,0),24 },
+       { IPv4(192,112,3,0),24 },
+       { IPv4(192,112,4,0),24 },
+       { IPv4(192,112,6,0),24 },
+       { IPv4(192,112,10,0),24 },
+       { IPv4(192,112,12,0),24 },
+       { IPv4(192,112,15,0),24 },
+       { IPv4(192,112,22,0),24 },
+       { IPv4(192,112,36,0),24 },
+       { IPv4(192,112,38,0),24 },
+       { IPv4(192,112,39,0),24 },
+       { IPv4(192,112,40,0),22 },
+       { IPv4(192,112,49,0),24 },
+       { IPv4(192,112,50,0),24 },
+       { IPv4(192,112,63,0),24 },
+       { IPv4(192,112,68,0),24 },
+       { IPv4(192,112,84,0),24 },
+       { IPv4(192,112,138,0),24 },
+       { IPv4(192,112,139,0),24 },
+       { IPv4(192,112,223,0),24 },
+       { IPv4(192,112,224,0),24 },
+       { IPv4(192,112,225,0),24 },
+       { IPv4(192,112,230,0),24 },
+       { IPv4(192,112,238,0),24 },
+       { IPv4(192,112,239,0),24 },
+       { IPv4(192,114,10,0),24 },
+       { IPv4(192,114,40,0),21 },
+       { IPv4(192,114,80,0),22 },
+       { IPv4(192,115,4,0),22 },
+       { IPv4(192,115,16,0),20 },
+       { IPv4(192,115,56,0),21 },
+       { IPv4(192,115,72,0),21 },
+       { IPv4(192,115,128,0),21 },
+       { IPv4(192,115,176,0),22 },
+       { IPv4(192,115,216,0),21 },
+       { IPv4(192,115,224,0),20 },
+       { IPv4(192,116,64,0),18 },
+       { IPv4(192,116,128,0),18 },
+       { IPv4(192,117,0,0),18 },
+       { IPv4(192,117,96,0),19 },
+       { IPv4(192,118,20,0),22 },
+       { IPv4(192,118,28,0),22 },
+       { IPv4(192,118,48,0),22 },
+       { IPv4(192,118,64,0),22 },
+       { IPv4(192,118,128,0),22 },
+       { IPv4(192,119,135,0),24 },
+       { IPv4(192,120,9,0),24 },
+       { IPv4(192,120,10,0),23 },
+       { IPv4(192,120,12,0),22 },
+       { IPv4(192,120,55,0),24 },
+       { IPv4(192,120,89,0),24 },
+       { IPv4(192,120,90,0),24 },
+       { IPv4(192,120,91,0),24 },
+       { IPv4(192,120,107,0),24 },
+       { IPv4(192,120,193,0),24 },
+       { IPv4(192,121,165,0),24 },
+       { IPv4(192,122,171,0),24 },
+       { IPv4(192,122,173,0),24 },
+       { IPv4(192,122,174,0),24 },
+       { IPv4(192,122,181,0),24 },
+       { IPv4(192,122,212,0),24 },
+       { IPv4(192,122,213,0),24 },
+       { IPv4(192,122,237,0),24 },
+       { IPv4(192,122,244,0),24 },
+       { IPv4(192,122,250,0),24 },
+       { IPv4(192,124,20,0),24 },
+       { IPv4(192,124,42,0),24 },
+       { IPv4(192,124,118,0),24 },
+       { IPv4(192,124,153,0),24 },
+       { IPv4(192,124,154,0),24 },
+       { IPv4(192,124,157,0),24 },
+       { IPv4(192,124,159,0),24 },
+       { IPv4(192,128,3,0),24 },
+       { IPv4(192,128,52,0),24 },
+       { IPv4(192,128,125,0),24 },
+       { IPv4(192,128,126,0),24 },
+       { IPv4(192,128,133,0),24 },
+       { IPv4(192,128,134,0),24 },
+       { IPv4(192,128,166,0),24 },
+       { IPv4(192,128,167,0),24 },
+       { IPv4(192,128,252,0),24 },
+       { IPv4(192,128,254,0),24 },
+       { IPv4(192,129,50,0),24 },
+       { IPv4(192,129,53,0),24 },
+       { IPv4(192,129,55,0),24 },
+       { IPv4(192,129,64,0),24 },
+       { IPv4(192,129,64,0),22 },
+       { IPv4(192,129,68,0),23 },
+       { IPv4(192,129,85,0),24 },
+       { IPv4(192,131,86,0),24 },
+       { IPv4(192,131,99,0),24 },
+       { IPv4(192,131,102,0),24 },
+       { IPv4(192,131,121,0),24 },
+       { IPv4(192,131,129,0),24 },
+       { IPv4(192,131,143,0),24 },
+       { IPv4(192,131,145,0),24 },
+       { IPv4(192,131,155,0),24 },
+       { IPv4(192,131,181,0),24 },
+       { IPv4(192,131,225,0),24 },
+       { IPv4(192,131,226,0),24 },
+       { IPv4(192,132,16,0),22 },
+       { IPv4(192,132,39,0),24 },
+       { IPv4(192,132,51,0),24 },
+       { IPv4(192,132,84,0),23 },
+       { IPv4(192,132,100,0),24 },
+       { IPv4(192,132,206,0),24 },
+       { IPv4(192,132,217,0),24 },
+       { IPv4(192,132,218,0),24 },
+       { IPv4(192,132,222,0),24 },
+       { IPv4(192,132,223,0),24 },
+       { IPv4(192,132,225,0),24 },
+       { IPv4(192,132,228,0),24 },
+       { IPv4(192,132,245,0),24 },
+       { IPv4(192,132,247,0),24 },
+       { IPv4(192,133,2,0),24 },
+       { IPv4(192,133,34,0),24 },
+       { IPv4(192,133,43,0),24 },
+       { IPv4(192,133,51,0),24 },
+       { IPv4(192,133,60,0),24 },
+       { IPv4(192,133,63,0),24 },
+       { IPv4(192,133,84,0),24 },
+       { IPv4(192,133,100,0),24 },
+       { IPv4(192,133,104,0),24 },
+       { IPv4(192,133,105,0),24 },
+       { IPv4(192,133,124,0),24 },
+       { IPv4(192,133,144,0),20 },
+       { IPv4(192,133,160,0),19 },
+       { IPv4(192,133,191,0),24 },
+       { IPv4(192,133,192,0),19 },
+       { IPv4(192,133,224,0),20 },
+       { IPv4(192,133,240,0),22 },
+       { IPv4(192,133,254,0),24 },
+       { IPv4(192,135,43,0),24 },
+       { IPv4(192,135,50,0),24 },
+       { IPv4(192,135,76,0),24 },
+       { IPv4(192,135,80,0),24 },
+       { IPv4(192,135,112,0),24 },
+       { IPv4(192,135,113,0),24 },
+       { IPv4(192,135,114,0),24 },
+       { IPv4(192,135,115,0),24 },
+       { IPv4(192,135,116,0),24 },
+       { IPv4(192,135,118,0),24 },
+       { IPv4(192,135,119,0),24 },
+       { IPv4(192,135,120,0),24 },
+       { IPv4(192,135,121,0),24 },
+       { IPv4(192,135,122,0),24 },
+       { IPv4(192,135,144,0),24 },
+       { IPv4(192,135,174,0),24 },
+       { IPv4(192,135,176,0),24 },
+       { IPv4(192,135,181,0),24 },
+       { IPv4(192,135,183,0),24 },
+       { IPv4(192,135,184,0),24 },
+       { IPv4(192,135,188,0),24 },
+       { IPv4(192,135,189,0),24 },
+       { IPv4(192,135,193,0),24 },
+       { IPv4(192,135,227,0),24 },
+       { IPv4(192,135,237,0),24 },
+       { IPv4(192,135,238,0),24 },
+       { IPv4(192,135,239,0),24 },
+       { IPv4(192,135,240,0),21 },
+       { IPv4(192,135,248,0),23 },
+       { IPv4(192,135,250,0),24 },
+       { IPv4(192,136,8,0),24 },
+       { IPv4(192,136,16,0),24 },
+       { IPv4(192,136,22,0),24 },
+       { IPv4(192,136,32,0),23 },
+       { IPv4(192,136,50,0),24 },
+       { IPv4(192,136,64,0),24 },
+       { IPv4(192,136,70,0),24 },
+       { IPv4(192,136,112,0),24 },
+       { IPv4(192,136,120,0),21 },
+       { IPv4(192,136,128,0),23 },
+       { IPv4(192,136,130,0),24 },
+       { IPv4(192,136,133,0),24 },
+       { IPv4(192,136,154,0),23 },
+       { IPv4(192,137,21,0),24 },
+       { IPv4(192,137,225,0),24 },
+       { IPv4(192,137,252,0),24 },
+       { IPv4(192,138,24,0),21 },
+       { IPv4(192,138,29,0),24 },
+       { IPv4(192,138,32,0),19 },
+       { IPv4(192,138,35,0),24 },
+       { IPv4(192,138,64,0),20 },
+       { IPv4(192,138,78,0),24 },
+       { IPv4(192,138,80,0),22 },
+       { IPv4(192,138,85,0),24 },
+       { IPv4(192,138,87,0),24 },
+       { IPv4(192,138,101,0),24 },
+       { IPv4(192,138,131,0),24 },
+       { IPv4(192,138,170,0),24 },
+       { IPv4(192,138,172,0),24 },
+       { IPv4(192,138,173,0),24 },
+       { IPv4(192,138,174,0),24 },
+       { IPv4(192,138,176,0),23 },
+       { IPv4(192,138,178,0),24 },
+       { IPv4(192,138,184,0),24 },
+       { IPv4(192,138,189,0),24 },
+       { IPv4(192,138,191,0),24 },
+       { IPv4(192,138,253,0),24 },
+       { IPv4(192,139,6,0),24 },
+       { IPv4(192,139,7,0),24 },
+       { IPv4(192,139,23,0),24 },
+       { IPv4(192,139,37,0),24 },
+       { IPv4(192,139,46,0),24 },
+       { IPv4(192,139,80,0),24 },
+       { IPv4(192,139,81,0),24 },
+       { IPv4(192,139,82,0),24 },
+       { IPv4(192,139,133,0),24 },
+       { IPv4(192,139,134,0),24 },
+       { IPv4(192,139,135,0),24 },
+       { IPv4(192,139,136,0),24 },
+       { IPv4(192,139,141,0),24 },
+       { IPv4(192,139,194,0),24 },
+       { IPv4(192,139,195,0),24 },
+       { IPv4(192,139,219,0),24 },
+       { IPv4(192,139,220,0),24 },
+       { IPv4(192,139,233,0),24 },
+       { IPv4(192,139,234,0),24 },
+       { IPv4(192,139,235,0),24 },
+       { IPv4(192,139,238,0),24 },
+       { IPv4(192,146,1,0),24 },
+       { IPv4(192,146,2,0),24 },
+       { IPv4(192,146,3,0),24 },
+       { IPv4(192,146,4,0),24 },
+       { IPv4(192,146,5,0),24 },
+       { IPv4(192,146,25,0),24 },
+       { IPv4(192,146,26,0),24 },
+       { IPv4(192,146,27,0),24 },
+       { IPv4(192,146,28,0),24 },
+       { IPv4(192,146,29,0),24 },
+       { IPv4(192,146,30,0),24 },
+       { IPv4(192,146,31,0),24 },
+       { IPv4(192,146,32,0),19 },
+       { IPv4(192,146,64,0),19 },
+       { IPv4(192,146,96,0),22 },
+       { IPv4(192,146,100,0),24 },
+       { IPv4(192,146,112,0),24 },
+       { IPv4(192,146,150,0),24 },
+       { IPv4(192,146,159,0),24 },
+       { IPv4(192,146,161,0),24 },
+       { IPv4(192,146,162,0),24 },
+       { IPv4(192,146,183,0),24 },
+       { IPv4(192,146,201,0),24 },
+       { IPv4(192,146,214,0),24 },
+       { IPv4(192,146,226,0),24 },
+       { IPv4(192,146,254,0),24 },
+       { IPv4(192,147,7,0),24 },
+       { IPv4(192,147,12,0),24 },
+       { IPv4(192,147,13,0),24 },
+       { IPv4(192,147,35,0),24 },
+       { IPv4(192,147,40,0),24 },
+       { IPv4(192,147,51,0),24 },
+       { IPv4(192,147,73,0),24 },
+       { IPv4(192,147,160,0),20 },
+       { IPv4(192,147,171,0),24 },
+       { IPv4(192,147,176,0),22 },
+       { IPv4(192,147,223,0),24 },
+       { IPv4(192,147,233,0),24 },
+       { IPv4(192,147,236,0),24 },
+       { IPv4(192,147,239,0),24 },
+       { IPv4(192,147,240,0),24 },
+       { IPv4(192,147,242,0),24 },
+       { IPv4(192,147,243,0),24 },
+       { IPv4(192,147,244,0),24 },
+       { IPv4(192,147,249,0),24 },
+       { IPv4(192,148,93,0),24 },
+       { IPv4(192,148,94,0),23 },
+       { IPv4(192,148,96,0),23 },
+       { IPv4(192,148,174,0),24 },
+       { IPv4(192,148,195,0),24 },
+       { IPv4(192,148,252,0),24 },
+       { IPv4(192,148,253,0),24 },
+       { IPv4(192,149,2,0),24 },
+       { IPv4(192,149,18,0),24 },
+       { IPv4(192,149,20,0),24 },
+       { IPv4(192,149,55,0),24 },
+       { IPv4(192,149,81,0),24 },
+       { IPv4(192,149,89,0),24 },
+       { IPv4(192,149,92,0),24 },
+       { IPv4(192,149,104,0),24 },
+       { IPv4(192,149,107,0),24 },
+       { IPv4(192,149,108,0),24 },
+       { IPv4(192,149,138,0),24 },
+       { IPv4(192,149,140,0),24 },
+       { IPv4(192,149,141,0),24 },
+       { IPv4(192,149,142,0),24 },
+       { IPv4(192,149,146,0),24 },
+       { IPv4(192,149,147,0),24 },
+       { IPv4(192,149,148,0),24 },
+       { IPv4(192,149,151,0),24 },
+       { IPv4(192,149,214,0),24 },
+       { IPv4(192,149,216,0),24 },
+       { IPv4(192,149,217,0),24 },
+       { IPv4(192,149,231,0),24 },
+       { IPv4(192,149,235,0),24 },
+       { IPv4(192,149,237,0),24 },
+       { IPv4(192,149,240,0),24 },
+       { IPv4(192,150,14,0),24 },
+       { IPv4(192,150,15,0),24 },
+       { IPv4(192,150,21,0),24 },
+       { IPv4(192,150,27,0),24 },
+       { IPv4(192,150,28,0),24 },
+       { IPv4(192,150,31,0),24 },
+       { IPv4(192,150,32,0),21 },
+       { IPv4(192,150,87,0),24 },
+       { IPv4(192,150,103,0),24 },
+       { IPv4(192,150,113,0),24 },
+       { IPv4(192,150,123,0),24 },
+       { IPv4(192,150,175,0),24 },
+       { IPv4(192,150,176,0),24 },
+       { IPv4(192,150,186,0),23 },
+       { IPv4(192,150,199,0),24 },
+       { IPv4(192,150,210,0),24 },
+       { IPv4(192,150,216,0),24 },
+       { IPv4(192,150,221,0),24 },
+       { IPv4(192,150,224,0),24 },
+       { IPv4(192,150,242,0),24 },
+       { IPv4(192,150,245,0),24 },
+       { IPv4(192,150,249,0),24 },
+       { IPv4(192,150,250,0),23 },
+       { IPv4(192,150,253,0),24 },
+       { IPv4(192,151,7,0),24 },
+       { IPv4(192,151,10,0),23 },
+       { IPv4(192,151,30,0),24 },
+       { IPv4(192,151,34,0),24 },
+       { IPv4(192,151,39,0),24 },
+       { IPv4(192,151,46,0),24 },
+       { IPv4(192,151,110,0),24 },
+       { IPv4(192,151,112,0),24 },
+       { IPv4(192,152,4,0),24 },
+       { IPv4(192,152,16,0),21 },
+       { IPv4(192,152,43,0),24 },
+       { IPv4(192,152,54,0),24 },
+       { IPv4(192,152,95,0),24 },
+       { IPv4(192,152,99,0),24 },
+       { IPv4(192,152,102,0),24 },
+       { IPv4(192,152,106,0),24 },
+       { IPv4(192,152,137,0),24 },
+       { IPv4(192,152,138,0),24 },
+       { IPv4(192,152,183,0),24 },
+       { IPv4(192,152,212,0),24 },
+       { IPv4(192,152,243,0),24 },
+       { IPv4(192,152,245,0),24 },
+       { IPv4(192,153,10,0),24 },
+       { IPv4(192,153,11,0),24 },
+       { IPv4(192,153,20,0),24 },
+       { IPv4(192,153,22,0),24 },
+       { IPv4(192,153,23,0),24 },
+       { IPv4(192,153,24,0),24 },
+       { IPv4(192,153,25,0),24 },
+       { IPv4(192,153,48,0),21 },
+       { IPv4(192,153,51,0),24 },
+       { IPv4(192,153,92,0),24 },
+       { IPv4(192,153,93,0),24 },
+       { IPv4(192,153,124,0),24 },
+       { IPv4(192,153,132,0),22 },
+       { IPv4(192,153,136,0),21 },
+       { IPv4(192,153,144,0),21 },
+       { IPv4(192,153,156,0),24 },
+       { IPv4(192,153,157,0),24 },
+       { IPv4(192,153,159,0),24 },
+       { IPv4(192,153,191,0),24 },
+       { IPv4(192,153,219,0),24 },
+       { IPv4(192,153,244,0),23 },
+       { IPv4(192,153,245,0),24 },
+       { IPv4(192,153,247,0),24 },
+       { IPv4(192,154,57,0),24 },
+       { IPv4(192,156,0,0),19 },
+       { IPv4(192,156,13,0),24 },
+       { IPv4(192,156,26,0),24 },
+       { IPv4(192,156,27,0),24 },
+       { IPv4(192,156,32,0),19 },
+       { IPv4(192,156,33,0),24 },
+       { IPv4(192,156,61,0),24 },
+       { IPv4(192,156,63,0),24 },
+       { IPv4(192,156,64,0),24 },
+       { IPv4(192,156,64,0),20 },
+       { IPv4(192,156,65,0),24 },
+       { IPv4(192,156,66,0),24 },
+       { IPv4(192,156,67,0),24 },
+       { IPv4(192,156,80,0),23 },
+       { IPv4(192,156,81,0),24 },
+       { IPv4(192,156,84,0),24 },
+       { IPv4(192,156,86,0),23 },
+       { IPv4(192,156,86,0),24 },
+       { IPv4(192,156,87,0),24 },
+       { IPv4(192,156,88,0),24 },
+       { IPv4(192,156,88,0),21 },
+       { IPv4(192,156,89,0),24 },
+       { IPv4(192,156,90,0),24 },
+       { IPv4(192,156,91,0),24 },
+       { IPv4(192,156,93,0),24 },
+       { IPv4(192,156,95,0),24 },
+       { IPv4(192,156,98,0),24 },
+       { IPv4(192,156,101,0),24 },
+       { IPv4(192,156,133,0),24 },
+       { IPv4(192,156,134,0),24 },
+       { IPv4(192,156,135,0),24 },
+       { IPv4(192,156,136,0),24 },
+       { IPv4(192,156,166,0),24 },
+       { IPv4(192,156,191,0),24 },
+       { IPv4(192,156,202,0),24 },
+       { IPv4(192,156,212,0),24 },
+       { IPv4(192,156,214,0),24 },
+       { IPv4(192,156,220,0),24 },
+       { IPv4(192,156,226,0),24 },
+       { IPv4(192,156,234,0),24 },
+       { IPv4(192,156,243,0),24 },
+       { IPv4(192,157,130,0),24 },
+       { IPv4(192,158,48,0),24 },
+       { IPv4(192,158,61,0),24 },
+       { IPv4(192,159,13,0),24 },
+       { IPv4(192,159,32,0),22 },
+       { IPv4(192,159,104,0),24 },
+       { IPv4(192,159,111,0),24 },
+       { IPv4(192,159,130,0),24 },
+       { IPv4(192,160,15,0),24 },
+       { IPv4(192,160,35,0),24 },
+       { IPv4(192,160,49,0),24 },
+       { IPv4(192,160,53,0),24 },
+       { IPv4(192,160,55,0),24 },
+       { IPv4(192,160,61,0),24 },
+       { IPv4(192,160,62,0),24 },
+       { IPv4(192,160,69,0),24 },
+       { IPv4(192,160,73,0),24 },
+       { IPv4(192,160,74,0),24 },
+       { IPv4(192,160,97,0),24 },
+       { IPv4(192,160,98,0),23 },
+       { IPv4(192,160,100,0),24 },
+       { IPv4(192,160,122,0),24 },
+       { IPv4(192,160,125,0),24 },
+       { IPv4(192,160,129,0),24 },
+       { IPv4(192,160,130,0),24 },
+       { IPv4(192,160,158,0),24 },
+       { IPv4(192,160,159,0),24 },
+       { IPv4(192,160,165,0),24 },
+       { IPv4(192,160,186,0),24 },
+       { IPv4(192,160,187,0),24 },
+       { IPv4(192,160,242,0),24 },
+       { IPv4(192,160,243,0),24 },
+       { IPv4(192,160,244,0),24 },
+       { IPv4(192,161,36,0),24 },
+       { IPv4(192,162,16,0),24 },
+       { IPv4(192,164,72,0),21 },
+       { IPv4(192,164,128,0),19 },
+       { IPv4(192,164,176,0),20 },
+       { IPv4(192,164,192,0),20 },
+       { IPv4(192,165,188,0),24 },
+       { IPv4(192,165,207,0),24 },
+       { IPv4(192,169,4,0),24 },
+       { IPv4(192,169,5,0),24 },
+       { IPv4(192,169,39,0),24 },
+       { IPv4(192,169,40,0),23 },
+       { IPv4(192,169,64,0),23 },
+       { IPv4(192,170,0,0),18 },
+       { IPv4(192,170,64,0),19 },
+       { IPv4(192,170,66,0),24 },
+       { IPv4(192,170,73,0),24 },
+       { IPv4(192,170,79,0),24 },
+       { IPv4(192,170,96,0),19 },
+       { IPv4(192,171,8,0),22 },
+       { IPv4(192,171,12,0),24 },
+       { IPv4(192,171,16,0),23 },
+       { IPv4(192,171,80,0),20 },
+       { IPv4(192,171,101,0),24 },
+       { IPv4(192,171,108,0),24 },
+       { IPv4(192,171,111,0),24 },
+       { IPv4(192,171,113,0),24 },
+       { IPv4(192,172,0,0),19 },
+       { IPv4(192,172,222,0),24 },
+       { IPv4(192,172,226,0),24 },
+       { IPv4(192,172,241,0),24 },
+       { IPv4(192,174,32,0),19 },
+       { IPv4(192,175,165,0),24 },
+       { IPv4(192,175,173,0),24 },
+       { IPv4(192,175,182,0),23 },
+       { IPv4(192,175,185,0),24 },
+       { IPv4(192,175,198,0),24 },
+       { IPv4(192,175,209,0),24 },
+       { IPv4(192,175,253,0),24 },
+       { IPv4(192,176,253,0),24 },
+       { IPv4(192,187,4,0),24 },
+       { IPv4(192,187,4,0),22 },
+       { IPv4(192,187,128,0),17 },
+       { IPv4(192,187,156,0),24 },
+       { IPv4(192,187,206,0),24 },
+       { IPv4(192,188,3,0),24 },
+       { IPv4(192,188,4,0),24 },
+       { IPv4(192,188,16,0),24 },
+       { IPv4(192,188,17,0),24 },
+       { IPv4(192,188,34,0),24 },
+       { IPv4(192,188,35,0),24 },
+       { IPv4(192,188,53,0),24 },
+       { IPv4(192,188,57,0),24 },
+       { IPv4(192,188,60,0),24 },
+       { IPv4(192,188,70,0),24 },
+       { IPv4(192,188,72,0),24 },
+       { IPv4(192,188,89,0),24 },
+       { IPv4(192,188,90,0),24 },
+       { IPv4(192,188,96,0),24 },
+       { IPv4(192,188,106,0),24 },
+       { IPv4(192,188,107,0),24 },
+       { IPv4(192,188,114,0),24 },
+       { IPv4(192,188,136,0),24 },
+       { IPv4(192,188,148,0),24 },
+       { IPv4(192,188,149,0),24 },
+       { IPv4(192,188,159,0),24 },
+       { IPv4(192,188,193,0),24 },
+       { IPv4(192,188,199,0),24 },
+       { IPv4(192,188,202,0),24 },
+       { IPv4(192,188,204,0),22 },
+       { IPv4(192,188,208,0),20 },
+       { IPv4(192,188,230,0),24 },
+       { IPv4(192,188,231,0),24 },
+       { IPv4(192,188,232,0),24 },
+       { IPv4(192,188,238,0),23 },
+       { IPv4(192,188,240,0),24 },
+       { IPv4(192,188,253,0),24 },
+       { IPv4(192,189,32,0),24 },
+       { IPv4(192,189,44,0),24 },
+       { IPv4(192,189,45,0),24 },
+       { IPv4(192,189,46,0),24 },
+       { IPv4(192,189,47,0),24 },
+       { IPv4(192,189,48,0),24 },
+       { IPv4(192,189,54,0),24 },
+       { IPv4(192,189,62,0),24 },
+       { IPv4(192,189,65,0),24 },
+       { IPv4(192,189,74,0),24 },
+       { IPv4(192,189,172,0),24 },
+       { IPv4(192,189,174,0),24 },
+       { IPv4(192,189,177,0),24 },
+       { IPv4(192,189,184,0),22 },
+       { IPv4(192,189,197,0),24 },
+       { IPv4(192,189,199,0),24 },
+       { IPv4(192,189,218,0),24 },
+       { IPv4(192,189,226,0),24 },
+       { IPv4(192,189,227,0),24 },
+       { IPv4(192,189,247,0),24 },
+       { IPv4(192,189,249,0),24 },
+       { IPv4(192,190,12,0),24 },
+       { IPv4(192,190,37,0),24 },
+       { IPv4(192,190,38,0),24 },
+       { IPv4(192,190,45,0),24 },
+       { IPv4(192,190,60,0),24 },
+       { IPv4(192,190,66,0),24 },
+       { IPv4(192,190,68,0),24 },
+       { IPv4(192,190,106,0),24 },
+       { IPv4(192,190,109,0),24 },
+       { IPv4(192,190,111,0),24 },
+       { IPv4(192,190,224,0),24 },
+       { IPv4(192,192,0,0),24 },
+       { IPv4(192,192,0,0),16 },
+       { IPv4(192,192,1,0),24 },
+       { IPv4(192,192,2,0),24 },
+       { IPv4(192,192,15,0),24 },
+       { IPv4(192,193,44,0),24 },
+       { IPv4(192,193,45,0),24 },
+       { IPv4(192,193,48,0),24 },
+       { IPv4(192,193,70,0),24 },
+       { IPv4(192,193,74,0),24 },
+       { IPv4(192,193,75,0),24 },
+       { IPv4(192,193,76,0),24 },
+       { IPv4(192,193,78,0),24 },
+       { IPv4(192,193,79,0),24 },
+       { IPv4(192,193,85,0),24 },
+       { IPv4(192,193,126,0),24 },
+       { IPv4(192,193,127,0),24 },
+       { IPv4(192,193,192,0),24 },
+       { IPv4(192,193,193,0),24 },
+       { IPv4(192,193,195,0),24 },
+       { IPv4(192,193,196,0),24 },
+       { IPv4(192,193,208,0),24 },
+       { IPv4(192,193,210,0),24 },
+       { IPv4(192,193,211,0),24 },
+       { IPv4(192,195,26,0),24 },
+       { IPv4(192,195,30,0),24 },
+       { IPv4(192,195,38,0),24 },
+       { IPv4(192,195,41,0),24 },
+       { IPv4(192,195,44,0),24 },
+       { IPv4(192,195,49,0),24 },
+       { IPv4(192,195,50,0),24 },
+       { IPv4(192,195,68,0),23 },
+       { IPv4(192,195,70,0),24 },
+       { IPv4(192,195,85,0),24 },
+       { IPv4(192,195,153,0),24 },
+       { IPv4(192,195,154,0),23 },
+       { IPv4(192,195,176,0),24 },
+       { IPv4(192,195,177,0),24 },
+       { IPv4(192,195,190,0),24 },
+       { IPv4(192,195,192,0),22 },
+       { IPv4(192,195,196,0),24 },
+       { IPv4(192,195,243,0),24 },
+       { IPv4(192,195,245,0),24 },
+       { IPv4(192,197,0,0),19 },
+       { IPv4(192,197,48,0),23 },
+       { IPv4(192,197,50,0),24 },
+       { IPv4(192,197,67,0),24 },
+       { IPv4(192,197,69,0),24 },
+       { IPv4(192,197,72,0),24 },
+       { IPv4(192,197,76,0),24 },
+       { IPv4(192,197,77,0),24 },
+       { IPv4(192,197,78,0),24 },
+       { IPv4(192,197,79,0),24 },
+       { IPv4(192,197,82,0),24 },
+       { IPv4(192,197,83,0),24 },
+       { IPv4(192,197,111,0),24 },
+       { IPv4(192,197,114,0),24 },
+       { IPv4(192,197,115,0),24 },
+       { IPv4(192,197,166,0),24 },
+       { IPv4(192,197,178,0),24 },
+       { IPv4(192,197,180,0),24 },
+       { IPv4(192,197,181,0),24 },
+       { IPv4(192,197,182,0),24 },
+       { IPv4(192,197,183,0),24 },
+       { IPv4(192,197,184,0),24 },
+       { IPv4(192,197,186,0),24 },
+       { IPv4(192,197,191,0),24 },
+       { IPv4(192,197,212,0),23 },
+       { IPv4(192,197,214,0),24 },
+       { IPv4(192,197,243,0),24 },
+       { IPv4(192,197,244,0),24 },
+       { IPv4(192,197,253,0),24 },
+       { IPv4(192,198,148,0),24 },
+       { IPv4(192,200,2,0),24 },
+       { IPv4(192,200,3,0),24 },
+       { IPv4(192,200,4,0),24 },
+       { IPv4(192,200,5,0),24 },
+       { IPv4(192,200,6,0),24 },
+       { IPv4(192,200,7,0),24 },
+       { IPv4(192,203,40,0),24 },
+       { IPv4(192,203,41,0),24 },
+       { IPv4(192,203,43,0),24 },
+       { IPv4(192,203,48,0),24 },
+       { IPv4(192,203,48,0),22 },
+       { IPv4(192,203,49,0),24 },
+       { IPv4(192,203,50,0),24 },
+       { IPv4(192,203,51,0),24 },
+       { IPv4(192,203,106,0),24 },
+       { IPv4(192,203,130,0),23 },
+       { IPv4(192,203,132,0),24 },
+       { IPv4(192,203,136,0),23 },
+       { IPv4(192,203,138,0),24 },
+       { IPv4(192,203,139,0),24 },
+       { IPv4(192,203,140,0),22 },
+       { IPv4(192,203,144,0),24 },
+       { IPv4(192,203,167,0),24 },
+       { IPv4(192,203,174,0),24 },
+       { IPv4(192,203,178,0),24 },
+       { IPv4(192,203,180,0),24 },
+       { IPv4(192,203,188,0),24 },
+       { IPv4(192,203,190,0),24 },
+       { IPv4(192,203,191,0),24 },
+       { IPv4(192,203,196,0),24 },
+       { IPv4(192,203,201,0),24 },
+       { IPv4(192,203,204,0),24 },
+       { IPv4(192,203,206,0),24 },
+       { IPv4(192,203,212,0),24 },
+       { IPv4(192,203,214,0),23 },
+       { IPv4(192,203,230,0),24 },
+       { IPv4(192,203,247,0),24 },
+       { IPv4(192,203,249,0),24 },
+       { IPv4(192,204,0,0),16 },
+       { IPv4(192,204,160,0),21 },
+       { IPv4(192,205,31,0),24 },
+       { IPv4(192,205,32,0),22 },
+       { IPv4(192,205,36,0),23 },
+       { IPv4(192,206,21,0),24 },
+       { IPv4(192,206,50,0),24 },
+       { IPv4(192,206,101,0),24 },
+       { IPv4(192,206,177,0),24 },
+       { IPv4(192,206,180,0),24 },
+       { IPv4(192,206,185,0),24 },
+       { IPv4(192,206,217,0),24 },
+       { IPv4(192,206,218,0),24 },
+       { IPv4(192,206,235,0),24 },
+       { IPv4(192,207,13,0),24 },
+       { IPv4(192,207,20,0),24 },
+       { IPv4(192,207,36,0),24 },
+       { IPv4(192,207,63,0),24 },
+       { IPv4(192,207,69,0),24 },
+       { IPv4(192,207,72,0),24 },
+       { IPv4(192,207,74,0),24 },
+       { IPv4(192,207,119,0),24 },
+       { IPv4(192,207,133,0),24 },
+       { IPv4(192,207,159,0),24 },
+       { IPv4(192,207,163,0),24 },
+       { IPv4(192,207,169,0),24 },
+       { IPv4(192,207,179,0),24 },
+       { IPv4(192,207,181,0),24 },
+       { IPv4(192,207,184,0),24 },
+       { IPv4(192,207,187,0),24 },
+       { IPv4(192,207,207,0),24 },
+       { IPv4(192,207,208,0),22 },
+       { IPv4(192,207,209,0),24 },
+       { IPv4(192,207,210,0),23 },
+       { IPv4(192,207,212,0),24 },
+       { IPv4(192,207,223,0),24 },
+       { IPv4(192,207,225,0),24 },
+       { IPv4(192,207,228,0),22 },
+       { IPv4(192,207,233,0),24 },
+       { IPv4(192,207,235,0),24 },
+       { IPv4(192,208,16,0),24 },
+       { IPv4(192,208,17,0),24 },
+       { IPv4(192,208,18,0),24 },
+       { IPv4(192,208,19,0),24 },
+       { IPv4(192,208,20,0),24 },
+       { IPv4(192,208,21,0),24 },
+       { IPv4(192,208,22,0),24 },
+       { IPv4(192,208,23,0),24 },
+       { IPv4(192,208,24,0),24 },
+       { IPv4(192,208,25,0),24 },
+       { IPv4(192,208,26,0),24 },
+       { IPv4(192,208,27,0),24 },
+       { IPv4(192,208,28,0),24 },
+       { IPv4(192,208,29,0),24 },
+       { IPv4(192,208,30,0),23 },
+       { IPv4(192,208,35,0),24 },
+       { IPv4(192,208,38,0),24 },
+       { IPv4(192,208,40,0),24 },
+       { IPv4(192,209,117,0),24 },
+       { IPv4(192,210,98,0),24 },
+       { IPv4(192,211,64,0),24 },
+       { IPv4(192,211,64,0),19 },
+       { IPv4(192,211,66,0),24 },
+       { IPv4(192,211,67,0),24 },
+       { IPv4(192,211,71,0),24 },
+       { IPv4(192,211,72,0),24 },
+       { IPv4(192,211,75,0),24 },
+       { IPv4(192,211,76,0),22 },
+       { IPv4(192,211,80,0),22 },
+       { IPv4(192,211,84,0),23 },
+       { IPv4(192,211,88,0),24 },
+       { IPv4(192,211,94,0),23 },
+       { IPv4(192,211,96,0),23 },
+       { IPv4(192,211,96,0),20 },
+       { IPv4(192,211,102,0),23 },
+       { IPv4(192,211,103,0),24 },
+       { IPv4(192,211,105,0),24 },
+       { IPv4(192,211,107,0),24 },
+       { IPv4(192,211,110,0),23 },
+       { IPv4(192,211,112,0),21 },
+       { IPv4(192,211,112,0),23 },
+       { IPv4(192,211,114,0),24 },
+       { IPv4(192,211,116,0),24 },
+       { IPv4(192,211,117,0),24 },
+       { IPv4(192,211,118,0),24 },
+       { IPv4(192,211,120,0),24 },
+       { IPv4(192,211,120,0),22 },
+       { IPv4(192,211,121,0),24 },
+       { IPv4(192,211,122,0),24 },
+       { IPv4(192,215,0,0),16 },
+       { IPv4(192,215,1,0),24 },
+       { IPv4(192,215,3,0),24 },
+       { IPv4(192,215,4,0),24 },
+       { IPv4(192,215,8,0),23 },
+       { IPv4(192,215,11,0),24 },
+       { IPv4(192,215,14,0),24 },
+       { IPv4(192,215,16,0),23 },
+       { IPv4(192,215,21,0),24 },
+       { IPv4(192,215,22,0),23 },
+       { IPv4(192,215,26,0),24 },
+       { IPv4(192,215,32,0),24 },
+       { IPv4(192,215,36,0),24 },
+       { IPv4(192,215,48,0),24 },
+       { IPv4(192,215,50,0),23 },
+       { IPv4(192,215,58,0),24 },
+       { IPv4(192,215,64,0),23 },
+       { IPv4(192,215,70,0),23 },
+       { IPv4(192,215,72,0),22 },
+       { IPv4(192,215,78,0),23 },
+       { IPv4(192,215,81,0),24 },
+       { IPv4(192,215,101,0),24 },
+       { IPv4(192,215,102,0),24 },
+       { IPv4(192,215,103,0),24 },
+       { IPv4(192,215,107,0),24 },
+       { IPv4(192,215,120,0),24 },
+       { IPv4(192,215,122,0),24 },
+       { IPv4(192,215,123,0),24 },
+       { IPv4(192,215,124,0),24 },
+       { IPv4(192,215,140,0),22 },
+       { IPv4(192,215,145,0),24 },
+       { IPv4(192,215,146,0),23 },
+       { IPv4(192,215,150,0),24 },
+       { IPv4(192,215,160,0),23 },
+       { IPv4(192,215,162,0),24 },
+       { IPv4(192,215,164,0),24 },
+       { IPv4(192,215,168,0),24 },
+       { IPv4(192,215,169,0),24 },
+       { IPv4(192,215,170,0),24 },
+       { IPv4(192,215,171,0),24 },
+       { IPv4(192,215,175,0),24 },
+       { IPv4(192,215,176,0),24 },
+       { IPv4(192,215,180,0),23 },
+       { IPv4(192,215,184,0),24 },
+       { IPv4(192,215,185,0),24 },
+       { IPv4(192,215,191,0),24 },
+       { IPv4(192,215,194,0),24 },
+       { IPv4(192,215,198,0),24 },
+       { IPv4(192,215,200,0),21 },
+       { IPv4(192,215,212,0),24 },
+       { IPv4(192,215,213,0),24 },
+       { IPv4(192,215,214,0),24 },
+       { IPv4(192,215,215,0),24 },
+       { IPv4(192,215,216,0),23 },
+       { IPv4(192,215,220,0),24 },
+       { IPv4(192,215,234,0),23 },
+       { IPv4(192,215,241,0),24 },
+       { IPv4(192,215,248,0),24 },
+       { IPv4(192,215,249,0),24 },
+       { IPv4(192,215,254,0),24 },
+       { IPv4(192,216,8,0),24 },
+       { IPv4(192,216,44,0),24 },
+       { IPv4(192,216,45,0),24 },
+       { IPv4(192,216,56,0),24 },
+       { IPv4(192,216,57,0),24 },
+       { IPv4(192,216,61,0),24 },
+       { IPv4(192,216,72,0),24 },
+       { IPv4(192,216,73,0),24 },
+       { IPv4(192,216,74,0),24 },
+       { IPv4(192,216,79,0),24 },
+       { IPv4(192,216,89,0),24 },
+       { IPv4(192,216,93,0),24 },
+       { IPv4(192,216,95,0),24 },
+       { IPv4(192,216,139,0),24 },
+       { IPv4(192,216,144,0),21 },
+       { IPv4(192,216,186,0),24 },
+       { IPv4(192,216,242,0),24 },
+       { IPv4(192,217,0,0),16 },
+       { IPv4(192,218,8,0),23 },
+       { IPv4(192,218,10,0),23 },
+       { IPv4(192,218,12,0),23 },
+       { IPv4(192,218,14,0),24 },
+       { IPv4(192,218,15,0),24 },
+       { IPv4(192,218,128,0),23 },
+       { IPv4(192,218,140,0),24 },
+       { IPv4(192,218,151,0),24 },
+       { IPv4(192,219,150,0),24 },
+       { IPv4(192,220,0,0),16 },
+       { IPv4(192,222,1,0),24 },
+       { IPv4(192,222,2,0),23 },
+       { IPv4(192,222,4,0),22 },
+       { IPv4(192,222,8,0),24 },
+       { IPv4(192,222,64,0),23 },
+       { IPv4(192,222,64,0),19 },
+       { IPv4(192,222,66,0),24 },
+       { IPv4(192,222,67,0),24 },
+       { IPv4(192,222,69,0),24 },
+       { IPv4(192,222,70,0),24 },
+       { IPv4(192,222,71,0),24 },
+       { IPv4(192,222,72,0),22 },
+       { IPv4(192,222,78,0),23 },
+       { IPv4(192,222,80,0),24 },
+       { IPv4(192,222,82,0),23 },
+       { IPv4(192,222,90,0),24 },
+       { IPv4(192,222,93,0),24 },
+       { IPv4(192,222,94,0),24 },
+       { IPv4(192,222,96,0),22 },
+       { IPv4(192,222,99,0),24 },
+       { IPv4(192,223,4,0),24 },
+       { IPv4(192,223,6,0),24 },
+       { IPv4(192,223,7,0),24 },
+       { IPv4(192,223,35,0),24 },
+       { IPv4(192,223,36,0),24 },
+       { IPv4(192,223,37,0),24 },
+       { IPv4(192,223,57,0),24 },
+       { IPv4(192,223,154,0),24 },
+       { IPv4(192,223,160,0),24 },
+       { IPv4(192,223,161,0),24 },
+       { IPv4(192,223,163,0),24 },
+       { IPv4(192,223,169,0),24 },
+       { IPv4(192,223,172,0),24 },
+       { IPv4(192,223,174,0),24 },
+       { IPv4(192,223,176,0),21 },
+       { IPv4(192,223,184,0),21 },
+       { IPv4(192,223,192,0),21 },
+       { IPv4(192,223,200,0),24 },
+       { IPv4(192,223,203,0),24 },
+       { IPv4(192,223,204,0),24 },
+       { IPv4(192,223,206,0),24 },
+       { IPv4(192,223,207,0),24 },
+       { IPv4(192,223,208,0),21 },
+       { IPv4(192,223,219,0),24 },
+       { IPv4(192,223,221,0),24 },
+       { IPv4(192,223,222,0),24 },
+       { IPv4(192,223,223,0),24 },
+       { IPv4(192,223,225,0),24 },
+       { IPv4(192,223,226,0),24 },
+       { IPv4(192,223,227,0),24 },
+       { IPv4(192,223,228,0),24 },
+       { IPv4(192,223,235,0),24 },
+       { IPv4(192,223,237,0),24 },
+       { IPv4(192,223,241,0),24 },
+       { IPv4(192,223,242,0),24 },
+       { IPv4(192,223,243,0),24 },
+       { IPv4(192,223,246,0),24 },
+       { IPv4(192,223,248,0),21 },
+       { IPv4(192,224,11,0),24 },
+       { IPv4(192,225,32,0),20 },
+       { IPv4(192,225,48,0),21 },
+       { IPv4(192,225,56,0),24 },
+       { IPv4(192,225,64,0),19 },
+       { IPv4(192,227,1,0),24 },
+       { IPv4(192,227,2,0),23 },
+       { IPv4(192,227,4,0),22 },
+       { IPv4(192,227,8,0),21 },
+       { IPv4(192,228,128,0),17 },
+       { IPv4(192,228,128,0),18 },
+       { IPv4(192,228,192,0),19 },
+       { IPv4(192,228,224,0),19 },
+       { IPv4(192,229,42,0),24 },
+       { IPv4(192,231,6,0),24 },
+       { IPv4(192,231,31,0),24 },
+       { IPv4(192,231,43,0),24 },
+       { IPv4(192,231,63,0),24 },
+       { IPv4(192,231,86,0),24 },
+       { IPv4(192,231,90,0),24 },
+       { IPv4(192,231,110,0),24 },
+       { IPv4(192,231,128,0),24 },
+       { IPv4(192,231,135,0),24 },
+       { IPv4(192,231,139,0),24 },
+       { IPv4(192,231,156,0),22 },
+       { IPv4(192,231,160,0),24 },
+       { IPv4(192,231,162,0),23 },
+       { IPv4(192,231,164,0),24 },
+       { IPv4(192,231,172,0),24 },
+       { IPv4(192,231,193,0),24 },
+       { IPv4(192,231,202,0),24 },
+       { IPv4(192,231,214,0),24 },
+       { IPv4(192,231,221,0),24 },
+       { IPv4(192,231,231,0),24 },
+       { IPv4(192,232,95,0),24 },
+       { IPv4(192,232,117,0),24 },
+       { IPv4(192,232,118,0),24 },
+       { IPv4(192,232,119,0),24 },
+       { IPv4(192,232,120,0),21 },
+       { IPv4(192,232,120,0),24 },
+       { IPv4(192,232,121,0),24 },
+       { IPv4(192,233,80,0),24 },
+       { IPv4(192,233,81,0),24 },
+       { IPv4(192,234,14,0),23 },
+       { IPv4(192,234,16,0),24 },
+       { IPv4(192,234,17,0),24 },
+       { IPv4(192,234,18,0),23 },
+       { IPv4(192,234,20,0),24 },
+       { IPv4(192,234,65,0),24 },
+       { IPv4(192,234,72,0),24 },
+       { IPv4(192,234,96,0),24 },
+       { IPv4(192,234,101,0),24 },
+       { IPv4(192,234,135,0),24 },
+       { IPv4(192,234,136,0),24 },
+       { IPv4(192,234,137,0),24 },
+       { IPv4(192,234,140,0),24 },
+       { IPv4(192,234,153,0),24 },
+       { IPv4(192,234,167,0),24 },
+       { IPv4(192,234,173,0),24 },
+       { IPv4(192,234,175,0),24 },
+       { IPv4(192,234,176,0),24 },
+       { IPv4(192,234,177,0),24 },
+       { IPv4(192,234,223,0),24 },
+       { IPv4(192,234,235,0),24 },
+       { IPv4(192,234,237,0),24 },
+       { IPv4(192,234,247,0),24 },
+       { IPv4(192,234,253,0),24 },
+       { IPv4(192,235,0,0),20 },
+       { IPv4(192,235,16,0),20 },
+       { IPv4(192,237,0,0),19 },
+       { IPv4(192,237,29,0),24 },
+       { IPv4(192,237,32,0),19 },
+       { IPv4(192,237,114,0),24 },
+       { IPv4(192,237,115,0),24 },
+       { IPv4(192,237,125,0),24 },
+       { IPv4(192,239,13,0),24 },
+       { IPv4(192,239,39,0),24 },
+       { IPv4(192,239,48,0),24 },
+       { IPv4(192,240,128,0),20 },
+       { IPv4(192,240,135,0),24 },
+       { IPv4(192,241,47,0),24 },
+       { IPv4(192,243,0,0),20 },
+       { IPv4(192,243,16,0),21 },
+       { IPv4(192,243,173,0),24 },
+       { IPv4(192,244,4,0),24 },
+       { IPv4(192,244,8,0),21 },
+       { IPv4(192,244,24,0),23 },
+       { IPv4(192,244,75,0),24 },
+       { IPv4(192,244,231,0),24 },
+       { IPv4(192,244,247,0),24 },
+       { IPv4(192,244,253,0),24 },
+       { IPv4(192,245,19,0),24 },
+       { IPv4(192,245,20,0),22 },
+       { IPv4(192,245,25,0),24 },
+       { IPv4(192,245,26,0),24 },
+       { IPv4(192,245,27,0),24 },
+       { IPv4(192,245,28,0),24 },
+       { IPv4(192,245,29,0),24 },
+       { IPv4(192,245,33,0),24 },
+       { IPv4(192,245,36,0),24 },
+       { IPv4(192,245,42,0),23 },
+       { IPv4(192,245,58,0),23 },
+       { IPv4(192,245,61,0),24 },
+       { IPv4(192,245,81,0),24 },
+       { IPv4(192,245,82,0),23 },
+       { IPv4(192,245,84,0),23 },
+       { IPv4(192,245,86,0),24 },
+       { IPv4(192,245,88,0),24 },
+       { IPv4(192,245,89,0),24 },
+       { IPv4(192,245,90,0),24 },
+       { IPv4(192,245,92,0),24 },
+       { IPv4(192,245,95,0),24 },
+       { IPv4(192,245,98,0),24 },
+       { IPv4(192,245,119,0),24 },
+       { IPv4(192,245,142,0),24 },
+       { IPv4(192,245,153,0),24 },
+       { IPv4(192,245,163,0),24 },
+       { IPv4(192,245,171,0),24 },
+       { IPv4(192,245,176,0),24 },
+       { IPv4(192,245,179,0),24 },
+       { IPv4(192,245,187,0),24 },
+       { IPv4(192,245,197,0),24 },
+       { IPv4(192,245,198,0),23 },
+       { IPv4(192,245,200,0),21 },
+       { IPv4(192,245,218,0),24 },
+       { IPv4(192,245,232,0),24 },
+       { IPv4(192,245,249,0),24 },
+       { IPv4(192,245,250,0),24 },
+       { IPv4(192,246,9,0),24 },
+       { IPv4(192,246,17,0),24 },
+       { IPv4(192,246,34,0),24 },
+       { IPv4(192,246,69,0),24 },
+       { IPv4(192,246,76,0),24 },
+       { IPv4(192,246,84,0),24 },
+       { IPv4(192,246,85,0),24 },
+       { IPv4(192,246,88,0),24 },
+       { IPv4(192,246,103,0),24 },
+       { IPv4(192,246,117,0),24 },
+       { IPv4(192,246,123,0),24 },
+       { IPv4(192,246,150,0),24 },
+       { IPv4(192,246,155,0),24 },
+       { IPv4(192,246,171,0),24 },
+       { IPv4(192,246,172,0),24 },
+       { IPv4(192,246,218,0),24 },
+       { IPv4(192,246,224,0),22 },
+       { IPv4(192,246,228,0),23 },
+       { IPv4(192,246,230,0),24 },
+       { IPv4(192,246,231,0),24 },
+       { IPv4(192,246,232,0),22 },
+       { IPv4(192,247,16,0),20 },
+       { IPv4(192,248,0,0),17 },
+       { IPv4(192,249,24,0),24 },
+       { IPv4(192,249,46,0),24 },
+       { IPv4(192,249,47,0),24 },
+       { IPv4(192,249,48,0),24 },
+       { IPv4(192,249,49,0),24 },
+       { IPv4(192,250,0,0),20 },
+       { IPv4(192,250,112,0),24 },
+       { IPv4(192,251,6,0),24 },
+       { IPv4(192,251,7,0),24 },
+       { IPv4(192,251,14,0),24 },
+       { IPv4(192,251,26,0),24 },
+       { IPv4(192,251,27,0),24 },
+       { IPv4(192,251,28,0),24 },
+       { IPv4(192,251,29,0),24 },
+       { IPv4(192,251,30,0),24 },
+       { IPv4(192,251,46,0),23 },
+       { IPv4(192,251,66,0),23 },
+       { IPv4(192,251,68,0),23 },
+       { IPv4(192,251,94,0),24 },
+       { IPv4(192,251,147,0),24 },
+       { IPv4(192,251,193,0),24 },
+       { IPv4(192,251,195,0),24 },
+       { IPv4(192,251,213,0),24 },
+       { IPv4(192,251,219,0),24 },
+       { IPv4(192,251,220,0),22 },
+       { IPv4(192,251,224,0),24 },
+       { IPv4(192,252,0,0),21 },
+       { IPv4(192,252,64,0),18 },
+       { IPv4(192,252,76,0),24 },
+       { IPv4(193,0,14,0),24 },
+       { IPv4(193,0,224,0),22 },
+       { IPv4(193,3,128,0),23 },
+       { IPv4(193,5,2,0),24 },
+       { IPv4(193,5,24,0),24 },
+       { IPv4(193,5,25,0),24 },
+       { IPv4(193,5,41,0),24 },
+       { IPv4(193,5,68,0),23 },
+       { IPv4(193,5,160,0),21 },
+       { IPv4(193,5,240,0),21 },
+       { IPv4(193,5,248,0),23 },
+       { IPv4(193,5,255,0),24 },
+       { IPv4(193,8,35,0),24 },
+       { IPv4(193,8,40,0),23 },
+       { IPv4(193,8,109,0),24 },
+       { IPv4(193,8,197,0),24 },
+       { IPv4(193,9,120,0),24 },
+       { IPv4(193,9,124,0),22 },
+       { IPv4(193,9,254,0),24 },
+       { IPv4(193,16,48,0),20 },
+       { IPv4(193,18,249,0),24 },
+       { IPv4(193,22,100,0),23 },
+       { IPv4(193,22,120,0),21 },
+       { IPv4(193,23,134,0),24 },
+       { IPv4(193,23,148,0),22 },
+       { IPv4(193,23,164,0),24 },
+       { IPv4(193,23,167,0),24 },
+       { IPv4(193,24,16,0),21 },
+       { IPv4(193,24,48,0),20 },
+       { IPv4(193,24,64,0),23 },
+       { IPv4(193,24,65,0),24 },
+       { IPv4(193,24,66,0),24 },
+       { IPv4(193,28,5,0),24 },
+       { IPv4(193,28,62,0),24 },
+       { IPv4(193,28,212,0),24 },
+       { IPv4(193,29,230,0),24 },
+       { IPv4(193,30,20,0),24 },
+       { IPv4(193,30,28,0),22 },
+       { IPv4(193,30,202,0),24 },
+       { IPv4(193,32,17,0),24 },
+       { IPv4(193,32,23,0),24 },
+       { IPv4(193,32,98,0),23 },
+       { IPv4(193,32,114,0),24 },
+       { IPv4(193,32,208,0),23 },
+       { IPv4(193,32,254,0),24 },
+       { IPv4(193,34,230,0),23 },
+       { IPv4(193,34,230,0),24 },
+       { IPv4(193,35,182,0),23 },
+       { IPv4(193,35,184,0),21 },
+       { IPv4(193,35,192,0),22 },
+       { IPv4(193,35,196,0),23 },
+       { IPv4(193,35,255,0),24 },
+       { IPv4(193,36,232,0),24 },
+       { IPv4(193,37,32,0),24 },
+       { IPv4(193,37,36,0),24 },
+       { IPv4(193,37,69,0),24 },
+       { IPv4(193,37,160,0),24 },
+       { IPv4(193,38,52,0),24 },
+       { IPv4(193,38,64,0),18 },
+       { IPv4(193,38,168,0),24 },
+       { IPv4(193,38,169,0),24 },
+       { IPv4(193,39,16,0),20 },
+       { IPv4(193,39,32,0),19 },
+       { IPv4(193,39,64,0),23 },
+       { IPv4(193,39,122,0),24 },
+       { IPv4(193,39,133,0),24 },
+       { IPv4(193,39,144,0),24 },
+       { IPv4(193,39,246,0),24 },
+       { IPv4(193,41,2,0),23 },
+       { IPv4(193,41,10,0),23 },
+       { IPv4(193,41,36,0),24 },
+       { IPv4(193,41,56,0),22 },
+       { IPv4(193,41,90,0),24 },
+       { IPv4(193,41,93,0),24 },
+       { IPv4(193,41,118,0),23 },
+       { IPv4(193,41,128,0),22 },
+       { IPv4(193,41,148,0),23 },
+       { IPv4(193,41,164,0),23 },
+       { IPv4(193,41,184,0),22 },
+       { IPv4(193,42,128,0),22 },
+       { IPv4(193,43,15,0),24 },
+       { IPv4(193,46,135,0),24 },
+       { IPv4(193,47,104,0),21 },
+       { IPv4(193,47,112,0),20 },
+       { IPv4(193,47,128,0),21 },
+       { IPv4(193,53,23,0),24 },
+       { IPv4(193,53,80,0),24 },
+       { IPv4(193,56,127,0),24 },
+       { IPv4(193,57,105,0),24 },
+       { IPv4(193,57,106,0),24 },
+       { IPv4(193,57,107,0),24 },
+       { IPv4(193,57,109,0),24 },
+       { IPv4(193,57,110,0),24 },
+       { IPv4(193,58,70,0),24 },
+       { IPv4(193,58,70,0),23 },
+       { IPv4(193,58,71,0),24 },
+       { IPv4(193,58,204,0),22 },
+       { IPv4(193,58,208,0),24 },
+       { IPv4(193,58,209,0),24 },
+       { IPv4(193,61,112,0),22 },
+       { IPv4(193,73,62,0),24 },
+       { IPv4(193,73,73,0),24 },
+       { IPv4(193,73,74,0),24 },
+       { IPv4(193,73,75,0),24 },
+       { IPv4(193,73,76,0),24 },
+       { IPv4(193,73,78,0),24 },
+       { IPv4(193,73,79,0),24 },
+       { IPv4(193,73,80,0),24 },
+       { IPv4(193,73,81,0),24 },
+       { IPv4(193,73,82,0),24 },
+       { IPv4(193,73,83,0),24 },
+       { IPv4(193,73,84,0),24 },
+       { IPv4(193,73,85,0),24 },
+       { IPv4(193,73,86,0),24 },
+       { IPv4(193,73,87,0),24 },
+       { IPv4(193,73,88,0),24 },
+       { IPv4(193,73,89,0),24 },
+       { IPv4(193,73,90,0),24 },
+       { IPv4(193,73,91,0),24 },
+       { IPv4(193,73,92,0),24 },
+       { IPv4(193,73,93,0),24 },
+       { IPv4(193,73,94,0),24 },
+       { IPv4(193,73,95,0),24 },
+       { IPv4(193,73,96,0),24 },
+       { IPv4(193,73,97,0),24 },
+       { IPv4(193,73,98,0),24 },
+       { IPv4(193,73,99,0),24 },
+       { IPv4(193,73,100,0),24 },
+       { IPv4(193,73,101,0),24 },
+       { IPv4(193,73,102,0),24 },
+       { IPv4(193,73,103,0),24 },
+       { IPv4(193,82,158,0),24 },
+       { IPv4(193,83,209,0),24 },
+       { IPv4(193,83,212,0),24 },
+       { IPv4(193,92,46,0),24 },
+       { IPv4(193,96,28,0),24 },
+       { IPv4(193,96,112,0),21 },
+       { IPv4(193,96,173,0),24 },
+       { IPv4(193,96,230,0),24 },
+       { IPv4(193,97,96,0),20 },
+       { IPv4(193,97,120,0),22 },
+       { IPv4(193,97,124,0),23 },
+       { IPv4(193,97,129,0),24 },
+       { IPv4(193,97,184,0),24 },
+       { IPv4(193,98,1,0),24 },
+       { IPv4(193,98,110,0),24 },
+       { IPv4(193,99,144,0),24 },
+       { IPv4(193,99,145,0),24 },
+       { IPv4(193,100,32,0),19 },
+       { IPv4(193,100,232,0),24 },
+       { IPv4(193,101,58,0),24 },
+       { IPv4(193,101,67,0),24 },
+       { IPv4(193,102,208,0),24 },
+       { IPv4(193,102,227,0),24 },
+       { IPv4(193,103,1,0),24 },
+       { IPv4(193,103,2,0),23 },
+       { IPv4(193,103,4,0),22 },
+       { IPv4(193,103,8,0),21 },
+       { IPv4(193,103,16,0),20 },
+       { IPv4(193,103,32,0),19 },
+       { IPv4(193,103,64,0),18 },
+       { IPv4(193,108,42,0),23 },
+       { IPv4(193,108,64,0),21 },
+       { IPv4(193,108,91,0),24 },
+       { IPv4(193,108,92,0),24 },
+       { IPv4(193,108,100,0),24 },
+       { IPv4(193,108,132,0),23 },
+       { IPv4(193,108,148,0),22 },
+       { IPv4(193,108,165,0),24 },
+       { IPv4(193,108,210,0),24 },
+       { IPv4(193,108,214,0),24 },
+       { IPv4(193,108,232,0),23 },
+       { IPv4(193,108,238,0),23 },
+       { IPv4(193,108,252,0),24 },
+       { IPv4(193,108,253,0),24 },
+       { IPv4(193,108,254,0),24 },
+       { IPv4(193,108,255,0),24 },
+       { IPv4(193,109,81,0),24 },
+       { IPv4(193,109,108,0),22 },
+       { IPv4(193,109,116,0),24 },
+       { IPv4(193,109,122,0),24 },
+       { IPv4(193,109,138,0),23 },
+       { IPv4(193,109,142,0),23 },
+       { IPv4(193,109,215,0),24 },
+       { IPv4(193,113,0,0),16 },
+       { IPv4(193,113,22,0),23 },
+       { IPv4(193,114,118,0),24 },
+       { IPv4(193,114,233,0),24 },
+       { IPv4(193,114,248,0),24 },
+       { IPv4(193,117,72,0),21 },
+       { IPv4(193,117,190,0),24 },
+       { IPv4(193,118,16,0),20 },
+       { IPv4(193,119,176,0),20 },
+       { IPv4(193,122,136,4),30 },
+       { IPv4(193,123,112,0),20 },
+       { IPv4(193,125,78,0),23 },
+       { IPv4(193,128,184,0),22 },
+       { IPv4(193,131,100,0),22 },
+       { IPv4(193,131,114,0),23 },
+       { IPv4(193,131,127,0),24 },
+       { IPv4(193,132,4,0),22 },
+       { IPv4(193,132,203,0),24 },
+       { IPv4(193,134,254,0),24 },
+       { IPv4(193,135,104,0),23 },
+       { IPv4(193,135,106,0),24 },
+       { IPv4(193,135,254,0),24 },
+       { IPv4(193,138,32,0),19 },
+       { IPv4(193,140,192,0),20 },
+       { IPv4(193,140,208,0),21 },
+       { IPv4(193,141,64,0),24 },
+       { IPv4(193,141,176,0),24 },
+       { IPv4(193,141,182,0),24 },
+       { IPv4(193,141,183,0),24 },
+       { IPv4(193,141,188,0),24 },
+       { IPv4(193,148,24,0),21 },
+       { IPv4(193,148,32,0),22 },
+       { IPv4(193,148,36,0),23 },
+       { IPv4(193,148,246,0),24 },
+       { IPv4(193,149,32,0),19 },
+       { IPv4(193,149,192,0),18 },
+       { IPv4(193,149,217,0),24 },
+       { IPv4(193,150,152,0),21 },
+       { IPv4(193,150,160,0),22 },
+       { IPv4(193,150,164,0),24 },
+       { IPv4(193,162,104,0),23 },
+       { IPv4(193,164,96,0),19 },
+       { IPv4(193,164,192,0),24 },
+       { IPv4(193,164,194,0),23 },
+       { IPv4(193,164,242,0),23 },
+       { IPv4(193,168,2,0),24 },
+       { IPv4(193,171,114,0),24 },
+       { IPv4(193,172,0,0),15 },
+       { IPv4(193,176,64,0),24 },
+       { IPv4(193,176,93,0),24 },
+       { IPv4(193,176,94,0),23 },
+       { IPv4(193,176,136,0),21 },
+       { IPv4(193,177,224,0),21 },
+       { IPv4(193,178,53,0),24 },
+       { IPv4(193,178,131,0),24 },
+       { IPv4(193,178,132,0),24 },
+       { IPv4(193,178,148,0),23 },
+       { IPv4(193,178,173,0),24 },
+       { IPv4(193,178,208,0),24 },
+       { IPv4(193,178,219,0),24 },
+       { IPv4(193,180,62,0),24 },
+       { IPv4(193,181,0,0),24 },
+       { IPv4(193,183,18,0),23 },
+       { IPv4(193,186,93,0),24 },
+       { IPv4(193,186,94,0),24 },
+       { IPv4(193,186,161,0),24 },
+       { IPv4(193,186,188,0),22 },
+       { IPv4(193,188,32,0),20 },
+       { IPv4(193,188,32,0),24 },
+       { IPv4(193,188,34,0),23 },
+       { IPv4(193,188,36,0),23 },
+       { IPv4(193,188,40,0),21 },
+       { IPv4(193,188,135,0),24 },
+       { IPv4(193,188,160,0),19 },
+       { IPv4(193,192,32,0),19 },
+       { IPv4(193,192,64,0),19 },
+       { IPv4(193,192,224,0),19 },
+       { IPv4(193,192,230,0),24 },
+       { IPv4(193,192,246,0),24 },
+       { IPv4(193,192,249,0),24 },
+       { IPv4(193,193,97,0),24 },
+       { IPv4(193,193,99,0),24 },
+       { IPv4(193,193,104,0),24 },
+       { IPv4(193,193,106,0),24 },
+       { IPv4(193,193,108,0),24 },
+       { IPv4(193,193,112,0),24 },
+       { IPv4(193,193,113,0),24 },
+       { IPv4(193,193,121,0),24 },
+       { IPv4(193,193,122,0),24 },
+       { IPv4(193,193,123,0),24 },
+       { IPv4(193,193,124,0),22 },
+       { IPv4(193,193,161,0),24 },
+       { IPv4(193,193,171,0),24 },
+       { IPv4(193,193,184,0),23 },
+       { IPv4(193,194,64,0),19 },
+       { IPv4(193,194,64,0),24 },
+       { IPv4(193,194,68,0),24 },
+       { IPv4(193,194,76,0),24 },
+       { IPv4(193,194,130,0),24 },
+       { IPv4(193,194,136,0),24 },
+       { IPv4(193,194,158,0),24 },
+       { IPv4(193,195,0,0),16 },
+       { IPv4(193,195,63,0),24 },
+       { IPv4(193,195,234,0),24 },
+       { IPv4(193,203,30,0),23 },
+       { IPv4(193,203,96,0),19 },
+       { IPv4(193,203,225,0),24 },
+       { IPv4(193,203,226,0),24 },
+       { IPv4(193,203,240,0),20 },
+       { IPv4(193,218,80,0),23 },
+       { IPv4(193,218,84,0),22 },
+       { IPv4(193,218,88,0),22 },
+       { IPv4(193,218,92,0),23 },
+       { IPv4(193,218,99,0),24 },
+       { IPv4(193,218,104,0),24 },
+       { IPv4(193,218,121,0),24 },
+       { IPv4(193,222,60,0),24 },
+       { IPv4(193,226,30,0),24 },
+       { IPv4(193,226,31,0),24 },
+       { IPv4(193,226,32,0),24 },
+       { IPv4(193,226,33,0),24 },
+       { IPv4(193,226,35,0),24 },
+       { IPv4(193,226,44,0),24 },
+       { IPv4(193,226,54,0),24 },
+       { IPv4(193,226,57,0),24 },
+       { IPv4(193,226,64,0),24 },
+       { IPv4(193,226,82,0),23 },
+       { IPv4(193,226,83,0),24 },
+       { IPv4(193,226,84,0),24 },
+       { IPv4(193,226,88,0),23 },
+       { IPv4(193,226,95,0),24 },
+       { IPv4(193,226,98,0),24 },
+       { IPv4(193,226,99,0),24 },
+       { IPv4(193,226,100,0),24 },
+       { IPv4(193,226,101,0),24 },
+       { IPv4(193,226,103,0),24 },
+       { IPv4(193,226,111,0),24 },
+       { IPv4(193,227,97,0),24 },
+       { IPv4(193,227,105,0),24 },
+       { IPv4(193,227,106,0),24 },
+       { IPv4(193,227,107,0),24 },
+       { IPv4(193,228,61,0),24 },
+       { IPv4(193,228,62,0),24 },
+       { IPv4(193,230,0,0),17 },
+       { IPv4(193,230,134,0),23 },
+       { IPv4(193,230,135,0),24 },
+       { IPv4(193,230,142,0),24 },
+       { IPv4(193,230,145,0),24 },
+       { IPv4(193,230,146,0),24 },
+       { IPv4(193,230,160,0),24 },
+       { IPv4(193,230,163,0),24 },
+       { IPv4(193,230,166,0),24 },
+       { IPv4(193,230,167,0),24 },
+       { IPv4(193,230,169,0),24 },
+       { IPv4(193,230,213,0),24 },
+       { IPv4(193,230,232,0),24 },
+       { IPv4(193,230,234,0),23 },
+       { IPv4(193,230,237,0),24 },
+       { IPv4(193,230,239,0),24 },
+       { IPv4(193,230,242,0),24 },
+       { IPv4(193,230,243,0),24 },
+       { IPv4(193,230,245,0),24 },
+       { IPv4(193,230,248,0),24 },
+       { IPv4(193,230,253,0),24 },
+       { IPv4(193,231,79,0),24 },
+       { IPv4(193,231,98,0),24 },
+       { IPv4(193,231,99,0),24 },
+       { IPv4(193,231,109,0),24 },
+       { IPv4(193,231,116,0),22 },
+       { IPv4(193,231,119,0),24 },
+       { IPv4(193,231,120,0),21 },
+       { IPv4(193,231,122,0),24 },
+       { IPv4(193,231,123,0),24 },
+       { IPv4(193,231,164,0),22 },
+       { IPv4(193,231,180,0),22 },
+       { IPv4(193,231,204,0),24 },
+       { IPv4(193,231,206,0),23 },
+       { IPv4(193,231,246,0),24 },
+       { IPv4(193,231,250,0),24 },
+       { IPv4(193,234,220,0),23 },
+       { IPv4(193,235,130,0),23 },
+       { IPv4(193,235,206,0),24 },
+       { IPv4(193,237,0,0),16 },
+       { IPv4(193,238,0,0),16 },
+       { IPv4(193,242,96,0),24 },
+       { IPv4(193,242,113,0),24 },
+       { IPv4(193,242,115,0),24 },
+       { IPv4(193,242,116,0),24 },
+       { IPv4(193,243,162,0),23 },
+       { IPv4(193,243,164,0),23 },
+       { IPv4(193,243,176,0),22 },
+       { IPv4(193,243,180,0),23 },
+       { IPv4(193,243,192,0),19 },
+       { IPv4(193,243,224,0),19 },
+       { IPv4(193,246,96,0),24 },
+       { IPv4(193,246,101,0),24 },
+       { IPv4(193,246,108,0),23 },
+       { IPv4(193,246,120,0),24 },
+       { IPv4(193,246,123,0),24 },
+       { IPv4(193,247,48,0),23 },
+       { IPv4(193,247,51,0),24 },
+       { IPv4(193,247,54,0),23 },
+       { IPv4(193,247,56,0),22 },
+       { IPv4(193,247,68,0),22 },
+       { IPv4(193,247,74,0),23 },
+       { IPv4(193,247,76,0),24 },
+       { IPv4(193,247,81,0),24 },
+       { IPv4(193,247,87,0),24 },
+       { IPv4(193,247,88,0),24 },
+       { IPv4(193,247,94,0),24 },
+       { IPv4(193,247,101,0),24 },
+       { IPv4(193,247,133,0),24 },
+       { IPv4(193,247,134,0),23 },
+       { IPv4(193,247,147,0),24 },
+       { IPv4(193,247,180,0),24 },
+       { IPv4(193,247,183,0),24 },
+       { IPv4(193,247,189,0),24 },
+       { IPv4(193,247,202,0),24 },
+       { IPv4(193,247,218,0),24 },
+       { IPv4(193,247,219,0),24 },
+       { IPv4(193,247,220,0),22 },
+       { IPv4(193,247,238,0),24 },
+       { IPv4(193,254,28,0),24 },
+       { IPv4(193,255,106,0),24 },
+       { IPv4(194,8,64,0),19 },
+       { IPv4(194,8,96,0),19 },
+       { IPv4(194,8,128,0),19 },
+       { IPv4(194,8,228,0),22 },
+       { IPv4(194,8,231,0),24 },
+       { IPv4(194,8,232,0),24 },
+       { IPv4(194,8,233,0),24 },
+       { IPv4(194,8,234,0),24 },
+       { IPv4(194,8,235,0),24 },
+       { IPv4(194,8,236,0),24 },
+       { IPv4(194,9,124,0),23 },
+       { IPv4(194,9,126,0),24 },
+       { IPv4(194,10,201,0),24 },
+       { IPv4(194,13,240,0),20 },
+       { IPv4(194,14,6,0),23 },
+       { IPv4(194,14,80,0),24 },
+       { IPv4(194,14,81,0),24 },
+       { IPv4(194,14,86,0),24 },
+       { IPv4(194,15,64,0),21 },
+       { IPv4(194,15,72,0),22 },
+       { IPv4(194,15,175,0),24 },
+       { IPv4(194,15,230,0),24 },
+       { IPv4(194,15,237,0),24 },
+       { IPv4(194,15,243,0),24 },
+       { IPv4(194,20,8,0),21 },
+       { IPv4(194,20,40,0),23 },
+       { IPv4(194,20,42,0),24 },
+       { IPv4(194,20,44,0),22 },
+       { IPv4(194,20,49,0),24 },
+       { IPv4(194,20,50,0),24 },
+       { IPv4(194,20,52,0),22 },
+       { IPv4(194,20,56,0),23 },
+       { IPv4(194,20,60,0),22 },
+       { IPv4(194,20,108,0),22 },
+       { IPv4(194,20,155,0),24 },
+       { IPv4(194,20,199,0),24 },
+       { IPv4(194,20,200,0),21 },
+       { IPv4(194,20,208,0),21 },
+       { IPv4(194,20,216,0),22 },
+       { IPv4(194,20,222,0),24 },
+       { IPv4(194,20,226,0),24 },
+       { IPv4(194,20,229,0),24 },
+       { IPv4(194,20,248,0),24 },
+       { IPv4(194,21,4,0),22 },
+       { IPv4(194,21,8,0),22 },
+       { IPv4(194,21,19,0),24 },
+       { IPv4(194,21,20,0),22 },
+       { IPv4(194,21,25,0),24 },
+       { IPv4(194,21,28,0),22 },
+       { IPv4(194,29,0,0),19 },
+       { IPv4(194,29,64,0),24 },
+       { IPv4(194,29,65,0),24 },
+       { IPv4(194,29,68,0),23 },
+       { IPv4(194,29,71,0),24 },
+       { IPv4(194,29,72,0),21 },
+       { IPv4(194,29,97,0),24 },
+       { IPv4(194,29,98,0),24 },
+       { IPv4(194,29,99,0),24 },
+       { IPv4(194,29,100,0),23 },
+       { IPv4(194,29,102,0),23 },
+       { IPv4(194,29,216,0),21 },
+       { IPv4(194,30,128,0),19 },
+       { IPv4(194,30,192,0),18 },
+       { IPv4(194,31,16,0),20 },
+       { IPv4(194,31,77,0),24 },
+       { IPv4(194,31,205,0),24 },
+       { IPv4(194,31,220,0),24 },
+       { IPv4(194,31,227,0),24 },
+       { IPv4(194,31,240,0),23 },
+       { IPv4(194,31,242,0),24 },
+       { IPv4(194,32,125,0),24 },
+       { IPv4(194,32,126,0),23 },
+       { IPv4(194,32,174,0),23 },
+       { IPv4(194,32,203,0),24 },
+       { IPv4(194,32,221,0),24 },
+       { IPv4(194,33,47,0),24 },
+       { IPv4(194,33,48,0),23 },
+       { IPv4(194,34,112,0),20 },
+       { IPv4(194,35,15,0),24 },
+       { IPv4(194,35,252,0),24 },
+       { IPv4(194,36,120,0),24 },
+       { IPv4(194,36,172,0),22 },
+       { IPv4(194,36,208,0),24 },
+       { IPv4(194,36,219,0),24 },
+       { IPv4(194,36,220,0),24 },
+       { IPv4(194,36,223,0),24 },
+       { IPv4(194,38,74,0),23 },
+       { IPv4(194,38,76,0),22 },
+       { IPv4(194,38,80,0),21 },
+       { IPv4(194,38,88,0),23 },
+       { IPv4(194,38,90,0),24 },
+       { IPv4(194,39,121,0),24 },
+       { IPv4(194,39,148,0),24 },
+       { IPv4(194,39,237,0),24 },
+       { IPv4(194,40,0,0),17 },
+       { IPv4(194,41,1,0),24 },
+       { IPv4(194,41,60,0),23 },
+       { IPv4(194,41,62,0),24 },
+       { IPv4(194,42,56,0),21 },
+       { IPv4(194,42,128,0),19 },
+       { IPv4(194,42,160,0),19 },
+       { IPv4(194,42,176,0),20 },
+       { IPv4(194,42,192,0),20 },
+       { IPv4(194,44,16,0),24 },
+       { IPv4(194,44,26,0),24 },
+       { IPv4(194,44,110,0),24 },
+       { IPv4(194,44,154,0),24 },
+       { IPv4(194,44,186,0),24 },
+       { IPv4(194,45,45,0),24 },
+       { IPv4(194,45,106,0),24 },
+       { IPv4(194,45,127,0),24 },
+       { IPv4(194,45,184,0),24 },
+       { IPv4(194,45,227,0),24 },
+       { IPv4(194,45,232,0),24 },
+       { IPv4(194,48,88,0),22 },
+       { IPv4(194,48,124,0),22 },
+       { IPv4(194,48,128,0),22 },
+       { IPv4(194,48,132,0),24 },
+       { IPv4(194,48,132,0),22 },
+       { IPv4(194,48,136,0),22 },
+       { IPv4(194,48,204,0),22 },
+       { IPv4(194,48,208,0),21 },
+       { IPv4(194,48,216,0),22 },
+       { IPv4(194,49,21,0),24 },
+       { IPv4(194,49,22,0),24 },
+       { IPv4(194,49,60,0),24 },
+       { IPv4(194,53,57,0),24 },
+       { IPv4(194,53,92,0),24 },
+       { IPv4(194,55,84,0),23 },
+       { IPv4(194,55,86,0),24 },
+       { IPv4(194,55,183,0),24 },
+       { IPv4(194,55,246,0),23 },
+       { IPv4(194,56,97,0),24 },
+       { IPv4(194,56,165,0),24 },
+       { IPv4(194,56,244,0),24 },
+       { IPv4(194,56,250,0),23 },
+       { IPv4(194,59,16,0),23 },
+       { IPv4(194,59,96,0),19 },
+       { IPv4(194,59,133,0),24 },
+       { IPv4(194,59,152,0),23 },
+       { IPv4(194,59,154,0),24 },
+       { IPv4(194,59,156,0),24 },
+       { IPv4(194,59,182,0),24 },
+       { IPv4(194,60,98,0),24 },
+       { IPv4(194,60,99,0),24 },
+       { IPv4(194,60,100,0),24 },
+       { IPv4(194,60,101,0),24 },
+       { IPv4(194,60,102,0),24 },
+       { IPv4(194,60,103,0),24 },
+       { IPv4(194,60,104,0),24 },
+       { IPv4(194,60,105,0),24 },
+       { IPv4(194,60,106,0),23 },
+       { IPv4(194,60,108,0),22 },
+       { IPv4(194,61,41,0),24 },
+       { IPv4(194,61,49,0),24 },
+       { IPv4(194,61,63,0),24 },
+       { IPv4(194,61,173,0),24 },
+       { IPv4(194,61,230,0),24 },
+       { IPv4(194,62,124,0),24 },
+       { IPv4(194,63,0,0),17 },
+       { IPv4(194,64,125,0),24 },
+       { IPv4(194,64,151,0),24 },
+       { IPv4(194,68,8,0),22 },
+       { IPv4(194,68,12,0),24 },
+       { IPv4(194,68,56,0),24 },
+       { IPv4(194,68,102,0),24 },
+       { IPv4(194,68,222,0),24 },
+       { IPv4(194,69,16,0),20 },
+       { IPv4(194,69,32,0),19 },
+       { IPv4(194,69,169,0),24 },
+       { IPv4(194,69,181,0),24 },
+       { IPv4(194,69,182,0),24 },
+       { IPv4(194,69,253,0),24 },
+       { IPv4(194,70,0,0),16 },
+       { IPv4(194,71,222,0),24 },
+       { IPv4(194,72,0,0),14 },
+       { IPv4(194,72,154,0),24 },
+       { IPv4(194,73,16,0),21 },
+       { IPv4(194,73,24,0),21 },
+       { IPv4(194,73,74,0),24 },
+       { IPv4(194,73,84,0),24 },
+       { IPv4(194,73,85,0),24 },
+       { IPv4(194,73,86,0),24 },
+       { IPv4(194,73,94,0),23 },
+       { IPv4(194,73,144,0),24 },
+       { IPv4(194,73,228,0),23 },
+       { IPv4(194,74,80,0),21 },
+       { IPv4(194,74,88,0),21 },
+       { IPv4(194,74,96,0),21 },
+       { IPv4(194,74,104,0),21 },
+       { IPv4(194,74,108,0),24 },
+       { IPv4(194,74,111,0),24 },
+       { IPv4(194,74,112,0),21 },
+       { IPv4(194,74,120,0),21 },
+       { IPv4(194,74,128,0),21 },
+       { IPv4(194,74,136,0),21 },
+       { IPv4(194,74,152,0),21 },
+       { IPv4(194,74,160,0),19 },
+       { IPv4(194,75,0,0),20 },
+       { IPv4(194,75,16,0),21 },
+       { IPv4(194,75,24,0),21 },
+       { IPv4(194,75,26,0),24 },
+       { IPv4(194,75,27,0),24 },
+       { IPv4(194,75,40,0),21 },
+       { IPv4(194,75,44,0),23 },
+       { IPv4(194,75,48,0),21 },
+       { IPv4(194,75,64,0),20 },
+       { IPv4(194,75,80,0),21 },
+       { IPv4(194,75,88,0),21 },
+       { IPv4(194,75,112,0),21 },
+       { IPv4(194,75,120,0),21 },
+       { IPv4(194,75,178,0),24 },
+       { IPv4(194,75,192,0),21 },
+       { IPv4(194,75,200,0),21 },
+       { IPv4(194,75,208,0),21 },
+       { IPv4(194,75,216,0),21 },
+       { IPv4(194,75,220,0),23 },
+       { IPv4(194,76,40,0),24 },
+       { IPv4(194,76,45,0),24 },
+       { IPv4(194,76,146,0),24 },
+       { IPv4(194,76,240,0),24 },
+       { IPv4(194,77,0,0),16 },
+       { IPv4(194,77,20,0),24 },
+       { IPv4(194,77,21,0),24 },
+       { IPv4(194,77,24,0),23 },
+       { IPv4(194,77,26,0),23 },
+       { IPv4(194,77,28,0),24 },
+       { IPv4(194,77,71,0),24 },
+       { IPv4(194,77,76,0),24 },
+       { IPv4(194,77,82,0),24 },
+       { IPv4(194,77,90,0),24 },
+       { IPv4(194,77,97,0),24 },
+       { IPv4(194,77,139,0),24 },
+       { IPv4(194,77,153,0),24 },
+       { IPv4(194,77,161,0),24 },
+       { IPv4(194,77,213,0),24 },
+       { IPv4(194,77,253,0),24 },
+       { IPv4(194,85,23,0),24 },
+       { IPv4(194,85,48,0),21 },
+       { IPv4(194,85,56,0),24 },
+       { IPv4(194,85,57,0),24 },
+       { IPv4(194,88,58,0),24 },
+       { IPv4(194,88,128,0),19 },
+       { IPv4(194,93,50,0),24 },
+       { IPv4(194,93,192,0),18 },
+       { IPv4(194,96,0,0),16 },
+       { IPv4(194,97,0,0),16 },
+       { IPv4(194,97,120,0),21 },
+       { IPv4(194,97,128,0),19 },
+       { IPv4(194,97,168,0),24 },
+       { IPv4(194,99,111,0),24 },
+       { IPv4(194,99,115,0),24 },
+       { IPv4(194,101,64,0),21 },
+       { IPv4(194,101,72,0),22 },
+       { IPv4(194,101,76,0),23 },
+       { IPv4(194,101,78,0),24 },
+       { IPv4(194,102,0,0),19 },
+       { IPv4(194,102,16,0),24 },
+       { IPv4(194,102,79,0),24 },
+       { IPv4(194,102,99,0),24 },
+       { IPv4(194,102,114,0),24 },
+       { IPv4(194,102,120,0),22 },
+       { IPv4(194,102,127,0),24 },
+       { IPv4(194,102,131,0),24 },
+       { IPv4(194,102,144,0),22 },
+       { IPv4(194,102,148,0),24 },
+       { IPv4(194,102,170,0),24 },
+       { IPv4(194,102,172,0),24 },
+       { IPv4(194,102,173,0),24 },
+       { IPv4(194,102,174,0),24 },
+       { IPv4(194,102,181,0),24 },
+       { IPv4(194,102,192,0),24 },
+       { IPv4(194,102,224,0),24 },
+       { IPv4(194,102,232,0),24 },
+       { IPv4(194,102,233,0),24 },
+       { IPv4(194,103,23,0),24 },
+       { IPv4(194,103,152,0),24 },
+       { IPv4(194,104,100,0),24 },
+       { IPv4(194,104,120,0),22 },
+       { IPv4(194,104,124,0),23 },
+       { IPv4(194,104,138,0),23 },
+       { IPv4(194,104,140,0),24 },
+       { IPv4(194,104,142,0),24 },
+       { IPv4(194,104,175,0),24 },
+       { IPv4(194,105,8,0),24 },
+       { IPv4(194,105,12,0),22 },
+       { IPv4(194,105,16,0),24 },
+       { IPv4(194,105,20,0),24 },
+       { IPv4(194,105,64,0),19 },
+       { IPv4(194,105,160,0),19 },
+       { IPv4(194,106,188,0),22 },
+       { IPv4(194,107,60,0),22 },
+       { IPv4(194,107,64,0),22 },
+       { IPv4(194,107,68,0),24 },
+       { IPv4(194,107,82,0),24 },
+       { IPv4(194,107,83,0),24 },
+       { IPv4(194,107,96,0),20 },
+       { IPv4(194,107,112,0),24 },
+       { IPv4(194,107,114,0),23 },
+       { IPv4(194,107,116,0),22 },
+       { IPv4(194,107,120,0),21 },
+       { IPv4(194,112,128,0),18 },
+       { IPv4(194,112,192,0),18 },
+       { IPv4(194,115,182,0),23 },
+       { IPv4(194,115,224,0),20 },
+       { IPv4(194,117,128,0),19 },
+       { IPv4(194,118,0,0),16 },
+       { IPv4(194,119,128,0),18 },
+       { IPv4(194,119,224,0),19 },
+       { IPv4(194,120,55,0),24 },
+       { IPv4(194,120,228,0),24 },
+       { IPv4(194,121,56,0),24 },
+       { IPv4(194,124,112,0),22 },
+       { IPv4(194,124,145,0),24 },
+       { IPv4(194,124,146,0),23 },
+       { IPv4(194,124,148,0),22 },
+       { IPv4(194,125,228,0),24 },
+       { IPv4(194,125,229,0),24 },
+       { IPv4(194,125,230,0),24 },
+       { IPv4(194,125,231,0),24 },
+       { IPv4(194,125,252,0),23 },
+       { IPv4(194,126,6,0),24 },
+       { IPv4(194,126,10,0),24 },
+       { IPv4(194,126,11,0),24 },
+       { IPv4(194,126,12,0),24 },
+       { IPv4(194,126,16,0),24 },
+       { IPv4(194,126,17,0),24 },
+       { IPv4(194,126,18,0),24 },
+       { IPv4(194,126,20,0),24 },
+       { IPv4(194,126,23,0),24 },
+       { IPv4(194,126,24,0),24 },
+       { IPv4(194,126,27,0),24 },
+       { IPv4(194,126,46,0),24 },
+       { IPv4(194,126,47,0),24 },
+       { IPv4(194,126,64,0),19 },
+       { IPv4(194,126,128,0),24 },
+       { IPv4(194,126,136,0),24 },
+       { IPv4(194,126,140,0),24 },
+       { IPv4(194,126,142,0),24 },
+       { IPv4(194,127,171,0),24 },
+       { IPv4(194,130,152,0),21 },
+       { IPv4(194,132,122,0),24 },
+       { IPv4(194,132,149,0),24 },
+       { IPv4(194,133,98,0),24 },
+       { IPv4(194,133,160,0),20 },
+       { IPv4(194,133,240,0),20 },
+       { IPv4(194,133,242,0),24 },
+       { IPv4(194,133,243,0),24 },
+       { IPv4(194,133,244,0),24 },
+       { IPv4(194,139,6,0),23 },
+       { IPv4(194,139,128,0),18 },
+       { IPv4(194,140,64,0),19 },
+       { IPv4(194,140,80,0),24 },
+       { IPv4(194,140,82,0),24 },
+       { IPv4(194,140,224,0),19 },
+       { IPv4(194,143,160,0),19 },
+       { IPv4(194,145,122,0),23 },
+       { IPv4(194,145,124,0),24 },
+       { IPv4(194,145,147,0),24 },
+       { IPv4(194,145,150,0),23 },
+       { IPv4(194,147,112,0),23 },
+       { IPv4(194,147,171,0),24 },
+       { IPv4(194,147,234,0),23 },
+       { IPv4(194,147,236,0),23 },
+       { IPv4(194,149,24,0),23 },
+       { IPv4(194,149,72,0),22 },
+       { IPv4(194,149,91,0),24 },
+       { IPv4(194,149,236,0),24 },
+       { IPv4(194,149,243,0),24 },
+       { IPv4(194,149,246,0),24 },
+       { IPv4(194,149,247,0),24 },
+       { IPv4(194,149,248,0),24 },
+       { IPv4(194,149,249,0),24 },
+       { IPv4(194,149,250,0),24 },
+       { IPv4(194,149,251,0),24 },
+       { IPv4(194,149,252,0),24 },
+       { IPv4(194,149,253,0),24 },
+       { IPv4(194,149,254,0),24 },
+       { IPv4(194,149,255,0),24 },
+       { IPv4(194,151,0,0),16 },
+       { IPv4(194,151,2,0),23 },
+       { IPv4(194,151,128,0),19 },
+       { IPv4(194,152,128,0),19 },
+       { IPv4(194,153,83,0),24 },
+       { IPv4(194,153,86,0),23 },
+       { IPv4(194,153,99,0),24 },
+       { IPv4(194,153,132,0),24 },
+       { IPv4(194,153,136,0),21 },
+       { IPv4(194,153,144,0),24 },
+       { IPv4(194,153,150,0),24 },
+       { IPv4(194,153,160,0),21 },
+       { IPv4(194,153,176,0),21 },
+       { IPv4(194,153,227,0),24 },
+       { IPv4(194,153,229,0),24 },
+       { IPv4(194,153,231,0),24 },
+       { IPv4(194,153,236,0),24 },
+       { IPv4(194,153,241,0),24 },
+       { IPv4(194,153,250,0),24 },
+       { IPv4(194,153,253,0),24 },
+       { IPv4(194,153,255,0),24 },
+       { IPv4(194,154,0,0),19 },
+       { IPv4(194,154,128,0),24 },
+       { IPv4(194,154,129,0),24 },
+       { IPv4(194,154,131,0),24 },
+       { IPv4(194,154,132,0),24 },
+       { IPv4(194,154,133,0),24 },
+       { IPv4(194,154,134,0),24 },
+       { IPv4(194,154,135,0),24 },
+       { IPv4(194,154,136,0),22 },
+       { IPv4(194,154,140,0),22 },
+       { IPv4(194,154,144,0),22 },
+       { IPv4(194,154,149,0),24 },
+       { IPv4(194,154,152,0),24 },
+       { IPv4(194,154,153,0),24 },
+       { IPv4(194,154,154,0),24 },
+       { IPv4(194,154,156,0),24 },
+       { IPv4(194,154,157,0),24 },
+       { IPv4(194,154,158,0),24 },
+       { IPv4(194,154,159,0),24 },
+       { IPv4(194,154,160,0),19 },
+       { IPv4(194,154,192,0),19 },
+       { IPv4(194,158,128,0),19 },
+       { IPv4(194,158,160,0),19 },
+       { IPv4(194,158,224,0),19 },
+       { IPv4(194,158,250,0),23 },
+       { IPv4(194,158,252,0),24 },
+       { IPv4(194,159,0,0),16 },
+       { IPv4(194,159,72,0),23 },
+       { IPv4(194,159,224,0),21 },
+       { IPv4(194,161,154,0),24 },
+       { IPv4(194,161,200,0),23 },
+       { IPv4(194,161,200,0),24 },
+       { IPv4(194,161,201,0),24 },
+       { IPv4(194,164,7,0),24 },
+       { IPv4(194,165,64,0),19 },
+       { IPv4(194,165,209,0),24 },
+       { IPv4(194,168,0,0),16 },
+       { IPv4(194,171,96,0),21 },
+       { IPv4(194,174,84,0),23 },
+       { IPv4(194,176,179,0),24 },
+       { IPv4(194,177,96,0),19 },
+       { IPv4(194,177,128,0),19 },
+       { IPv4(194,180,25,0),24 },
+       { IPv4(194,180,128,0),24 },
+       { IPv4(194,180,160,0),21 },
+       { IPv4(194,180,239,0),24 },
+       { IPv4(194,183,128,0),19 },
+       { IPv4(194,183,192,0),19 },
+       { IPv4(194,183,211,0),24 },
+       { IPv4(194,183,218,0),24 },
+       { IPv4(194,183,224,0),19 },
+       { IPv4(194,185,0,0),16 },
+       { IPv4(194,185,25,0),24 },
+       { IPv4(194,193,17,0),24 },
+       { IPv4(194,194,0,0),16 },
+       { IPv4(194,196,0,0),16 },
+       { IPv4(194,196,47,0),24 },
+       { IPv4(194,196,196,0),24 },
+       { IPv4(194,196,248,0),24 },
+       { IPv4(194,201,253,0),24 },
+       { IPv4(194,202,0,0),22 },
+       { IPv4(194,202,4,0),23 },
+       { IPv4(194,203,201,0),24 },
+       { IPv4(194,208,0,0),16 },
+       { IPv4(194,209,108,0),24 },
+       { IPv4(194,209,146,0),24 },
+       { IPv4(194,209,185,0),24 },
+       { IPv4(194,213,64,0),19 },
+       { IPv4(194,216,59,0),24 },
+       { IPv4(194,216,168,0),24 },
+       { IPv4(194,217,0,0),16 },
+       { IPv4(194,217,92,0),24 },
+       { IPv4(194,217,220,0),23 },
+       { IPv4(194,222,0,0),16 },
+       { IPv4(194,230,0,0),16 },
+       { IPv4(194,230,57,0),24 },
+       { IPv4(194,230,99,0),24 },
+       { IPv4(194,231,0,0),16 },
+       { IPv4(194,231,54,0),24 },
+       { IPv4(194,231,105,0),24 },
+       { IPv4(194,231,164,0),23 },
+       { IPv4(194,231,168,0),24 },
+       { IPv4(194,231,236,0),22 },
+       { IPv4(194,231,242,0),23 },
+       { IPv4(194,231,246,0),24 },
+       { IPv4(194,231,254,0),23 },
+       { IPv4(194,232,0,0),16 },
+       { IPv4(194,235,143,0),24 },
+       { IPv4(194,235,243,0),24 },
+       { IPv4(194,238,0,0),16 },
+       { IPv4(194,242,34,0),24 },
+       { IPv4(194,242,35,0),24 },
+       { IPv4(194,242,41,0),24 },
+       { IPv4(194,242,45,0),24 },
+       { IPv4(194,242,54,0),24 },
+       { IPv4(194,242,58,0),24 },
+       { IPv4(194,242,61,0),24 },
+       { IPv4(194,242,64,0),19 },
+       { IPv4(194,242,160,0),24 },
+       { IPv4(194,246,96,0),24 },
+       { IPv4(194,247,64,0),19 },
+       { IPv4(194,247,74,0),24 },
+       { IPv4(194,247,75,0),24 },
+       { IPv4(194,247,91,0),24 },
+       { IPv4(194,253,130,0),24 },
+       { IPv4(194,253,184,0),24 },
+       { IPv4(195,2,128,0),19 },
+       { IPv4(195,2,160,0),19 },
+       { IPv4(195,3,108,0),23 },
+       { IPv4(195,4,67,0),24 },
+       { IPv4(195,4,68,0),23 },
+       { IPv4(195,5,0,0),19 },
+       { IPv4(195,5,32,0),19 },
+       { IPv4(195,5,64,0),19 },
+       { IPv4(195,5,197,0),24 },
+       { IPv4(195,5,204,0),24 },
+       { IPv4(195,7,224,0),19 },
+       { IPv4(195,8,64,0),19 },
+       { IPv4(195,10,96,0),20 },
+       { IPv4(195,10,112,0),20 },
+       { IPv4(195,10,224,0),19 },
+       { IPv4(195,11,0,0),16 },
+       { IPv4(195,11,224,0),19 },
+       { IPv4(195,12,0,0),19 },
+       { IPv4(195,12,192,0),19 },
+       { IPv4(195,13,40,0),22 },
+       { IPv4(195,13,64,0),18 },
+       { IPv4(195,14,64,0),19 },
+       { IPv4(195,16,0,0),19 },
+       { IPv4(195,16,64,0),19 },
+       { IPv4(195,16,128,0),19 },
+       { IPv4(195,16,224,0),19 },
+       { IPv4(195,18,64,0),18 },
+       { IPv4(195,22,0,0),19 },
+       { IPv4(195,24,64,0),19 },
+       { IPv4(195,24,192,0),19 },
+       { IPv4(195,26,64,0),19 },
+       { IPv4(195,26,96,0),19 },
+       { IPv4(195,26,192,0),19 },
+       { IPv4(195,28,224,0),19 },
+       { IPv4(195,30,0,0),16 },
+       { IPv4(195,33,0,0),16 },
+       { IPv4(195,33,18,0),24 },
+       { IPv4(195,33,64,0),24 },
+       { IPv4(195,33,96,0),19 },
+       { IPv4(195,33,192,0),18 },
+       { IPv4(195,34,160,0),19 },
+       { IPv4(195,35,81,0),24 },
+       { IPv4(195,35,105,0),24 },
+       { IPv4(195,35,106,0),24 },
+       { IPv4(195,35,110,0),24 },
+       { IPv4(195,35,121,0),24 },
+       { IPv4(195,35,126,0),24 },
+       { IPv4(195,35,128,0),18 },
+       { IPv4(195,38,19,0),24 },
+       { IPv4(195,38,64,0),19 },
+       { IPv4(195,38,192,0),18 },
+       { IPv4(195,42,240,0),21 },
+       { IPv4(195,46,0,0),21 },
+       { IPv4(195,46,8,0),21 },
+       { IPv4(195,46,16,0),20 },
+       { IPv4(195,46,128,0),21 },
+       { IPv4(195,46,128,0),24 },
+       { IPv4(195,46,128,0),19 },
+       { IPv4(195,46,132,0),24 },
+       { IPv4(195,46,134,0),24 },
+       { IPv4(195,46,141,0),24 },
+       { IPv4(195,46,142,0),24 },
+       { IPv4(195,46,224,0),19 },
+       { IPv4(195,51,64,0),24 },
+       { IPv4(195,51,174,0),23 },
+       { IPv4(195,51,176,0),23 },
+       { IPv4(195,51,180,0),24 },
+       { IPv4(195,54,96,0),19 },
+       { IPv4(195,54,224,0),19 },
+       { IPv4(195,58,128,0),19 },
+       { IPv4(195,58,192,0),19 },
+       { IPv4(195,60,0,0),19 },
+       { IPv4(195,61,32,0),24 },
+       { IPv4(195,61,61,0),24 },
+       { IPv4(195,64,0,0),19 },
+       { IPv4(195,65,24,0),24 },
+       { IPv4(195,65,76,0),24 },
+       { IPv4(195,65,77,0),24 },
+       { IPv4(195,65,78,0),24 },
+       { IPv4(195,65,158,0),24 },
+       { IPv4(195,66,128,0),19 },
+       { IPv4(195,66,224,0),19 },
+       { IPv4(195,70,64,0),19 },
+       { IPv4(195,70,96,0),19 },
+       { IPv4(195,72,128,0),19 },
+       { IPv4(195,72,160,0),19 },
+       { IPv4(195,74,96,0),19 },
+       { IPv4(195,74,128,0),19 },
+       { IPv4(195,74,224,0),19 },
+       { IPv4(195,75,0,0),16 },
+       { IPv4(195,75,46,0),24 },
+       { IPv4(195,78,128,0),19 },
+       { IPv4(195,79,161,0),24 },
+       { IPv4(195,79,171,0),24 },
+       { IPv4(195,80,0,0),19 },
+       { IPv4(195,80,32,0),19 },
+       { IPv4(195,80,129,0),24 },
+       { IPv4(195,80,134,0),24 },
+       { IPv4(195,81,40,0),23 },
+       { IPv4(195,82,32,0),19 },
+       { IPv4(195,82,128,0),19 },
+       { IPv4(195,82,224,0),19 },
+       { IPv4(195,85,128,0),18 },
+       { IPv4(195,86,0,0),16 },
+       { IPv4(195,87,0,0),16 },
+       { IPv4(195,88,10,0),23 },
+       { IPv4(195,88,17,0),24 },
+       { IPv4(195,88,42,0),24 },
+       { IPv4(195,88,43,0),24 },
+       { IPv4(195,88,44,0),23 },
+       { IPv4(195,88,46,0),24 },
+       { IPv4(195,88,120,0),24 },
+       { IPv4(195,88,123,0),24 },
+       { IPv4(195,88,146,0),24 },
+       { IPv4(195,88,160,0),22 },
+       { IPv4(195,90,64,0),19 },
+       { IPv4(195,90,192,0),19 },
+       { IPv4(195,90,224,0),19 },
+       { IPv4(195,93,0,0),17 },
+       { IPv4(195,93,16,0),20 },
+       { IPv4(195,93,32,0),20 },
+       { IPv4(195,93,48,0),20 },
+       { IPv4(195,93,64,0),20 },
+       { IPv4(195,93,80,0),20 },
+       { IPv4(195,94,0,0),24 },
+       { IPv4(195,94,1,0),24 },
+       { IPv4(195,94,2,0),24 },
+       { IPv4(195,94,3,0),24 },
+       { IPv4(195,94,4,0),24 },
+       { IPv4(195,94,5,0),24 },
+       { IPv4(195,94,6,0),24 },
+       { IPv4(195,94,96,0),19 },
+       { IPv4(195,95,0,0),18 },
+       { IPv4(195,95,64,0),19 },
+       { IPv4(195,95,96,0),19 },
+       { IPv4(195,95,128,0),21 },
+       { IPv4(195,95,152,0),21 },
+       { IPv4(195,95,160,0),20 },
+       { IPv4(195,96,32,0),19 },
+       { IPv4(195,97,0,0),17 },
+       { IPv4(195,97,87,0),24 },
+       { IPv4(195,98,192,0),19 },
+       { IPv4(195,99,0,0),16 },
+       { IPv4(195,99,8,0),21 },
+       { IPv4(195,99,16,0),21 },
+       { IPv4(195,99,24,0),21 },
+       { IPv4(195,99,32,0),21 },
+       { IPv4(195,99,64,0),20 },
+       { IPv4(195,99,69,0),24 },
+       { IPv4(195,99,80,0),21 },
+       { IPv4(195,99,88,0),21 },
+       { IPv4(195,99,93,0),24 },
+       { IPv4(195,99,96,0),21 },
+       { IPv4(195,99,104,0),21 },
+       { IPv4(195,99,112,0),21 },
+       { IPv4(195,99,192,0),21 },
+       { IPv4(195,99,200,0),21 },
+       { IPv4(195,99,208,0),21 },
+       { IPv4(195,99,224,0),21 },
+       { IPv4(195,99,232,0),21 },
+       { IPv4(195,99,248,0),21 },
+       { IPv4(195,100,0,0),16 },
+       { IPv4(195,102,0,0),16 },
+       { IPv4(195,102,253,0),24 },
+       { IPv4(195,106,206,0),23 },
+       { IPv4(195,110,160,0),19 },
+       { IPv4(195,110,192,0),19 },
+       { IPv4(195,112,0,0),18 },
+       { IPv4(195,112,64,0),19 },
+       { IPv4(195,121,0,0),16 },
+       { IPv4(195,129,2,0),24 },
+       { IPv4(195,129,34,0),23 },
+       { IPv4(195,130,160,0),19 },
+       { IPv4(195,134,0,0),19 },
+       { IPv4(195,134,128,0),19 },
+       { IPv4(195,138,124,0),22 },
+       { IPv4(195,138,128,0),24 },
+       { IPv4(195,138,128,0),21 },
+       { IPv4(195,138,131,0),24 },
+       { IPv4(195,138,133,0),24 },
+       { IPv4(195,138,136,0),23 },
+       { IPv4(195,138,137,0),24 },
+       { IPv4(195,138,140,0),24 },
+       { IPv4(195,138,140,0),22 },
+       { IPv4(195,138,141,0),24 },
+       { IPv4(195,138,142,0),24 },
+       { IPv4(195,138,144,0),24 },
+       { IPv4(195,138,144,0),20 },
+       { IPv4(195,138,145,0),24 },
+       { IPv4(195,138,147,0),24 },
+       { IPv4(195,138,148,0),24 },
+       { IPv4(195,138,149,0),24 },
+       { IPv4(195,138,151,0),24 },
+       { IPv4(195,138,152,0),24 },
+       { IPv4(195,138,153,0),24 },
+       { IPv4(195,138,155,0),24 },
+       { IPv4(195,138,156,0),24 },
+       { IPv4(195,138,157,0),24 },
+       { IPv4(195,138,158,0),24 },
+       { IPv4(195,138,159,0),24 },
+       { IPv4(195,138,224,0),19 },
+       { IPv4(195,141,0,0),16 },
+       { IPv4(195,141,7,0),24 },
+       { IPv4(195,141,36,0),23 },
+       { IPv4(195,141,162,0),24 },
+       { IPv4(195,141,233,0),24 },
+       { IPv4(195,141,250,0),23 },
+       { IPv4(195,146,32,0),19 },
+       { IPv4(195,147,0,0),16 },
+       { IPv4(195,149,0,0),18 },
+       { IPv4(195,155,161,0),24 },
+       { IPv4(195,162,0,0),19 },
+       { IPv4(195,162,64,0),19 },
+       { IPv4(195,162,96,0),19 },
+       { IPv4(195,162,160,0),19 },
+       { IPv4(195,162,224,0),19 },
+       { IPv4(195,163,0,0),17 },
+       { IPv4(195,163,128,0),19 },
+       { IPv4(195,163,160,0),20 },
+       { IPv4(195,163,176,0),20 },
+       { IPv4(195,163,192,0),18 },
+       { IPv4(195,167,0,0),17 },
+       { IPv4(195,170,0,0),19 },
+       { IPv4(195,170,64,0),19 },
+       { IPv4(195,171,0,0),16 },
+       { IPv4(195,171,32,0),22 },
+       { IPv4(195,171,36,0),22 },
+       { IPv4(195,171,44,0),22 },
+       { IPv4(195,171,48,0),22 },
+       { IPv4(195,171,52,0),22 },
+       { IPv4(195,171,56,0),22 },
+       { IPv4(195,171,60,0),23 },
+       { IPv4(195,171,62,0),23 },
+       { IPv4(195,171,64,0),23 },
+       { IPv4(195,171,84,0),24 },
+       { IPv4(195,171,85,0),24 },
+       { IPv4(195,171,86,0),24 },
+       { IPv4(195,171,88,0),21 },
+       { IPv4(195,171,100,0),22 },
+       { IPv4(195,173,0,0),16 },
+       { IPv4(195,173,224,0),19 },
+       { IPv4(195,176,128,0),22 },
+       { IPv4(195,176,139,0),24 },
+       { IPv4(195,176,156,0),23 },
+       { IPv4(195,182,0,0),19 },
+       { IPv4(195,182,96,0),19 },
+       { IPv4(195,183,0,0),16 },
+       { IPv4(195,184,64,0),19 },
+       { IPv4(195,184,128,0),19 },
+       { IPv4(195,184,132,0),24 },
+       { IPv4(195,184,137,0),24 },
+       { IPv4(195,184,140,0),24 },
+       { IPv4(195,184,146,0),24 },
+       { IPv4(195,184,147,0),24 },
+       { IPv4(195,184,158,0),24 },
+       { IPv4(195,184,159,0),24 },
+       { IPv4(195,184,224,0),19 },
+       { IPv4(195,188,0,0),16 },
+       { IPv4(195,190,32,0),19 },
+       { IPv4(195,190,160,0),19 },
+       { IPv4(195,200,64,0),19 },
+       { IPv4(195,200,128,0),19 },
+       { IPv4(195,202,65,0),24 },
+       { IPv4(195,202,68,0),24 },
+       { IPv4(195,202,71,0),24 },
+       { IPv4(195,202,73,0),24 },
+       { IPv4(195,202,74,0),24 },
+       { IPv4(195,202,79,0),24 },
+       { IPv4(195,202,82,0),24 },
+       { IPv4(195,202,83,0),24 },
+       { IPv4(195,202,128,0),19 },
+       { IPv4(195,202,160,0),19 },
+       { IPv4(195,202,192,0),18 },
+       { IPv4(195,206,64,0),19 },
+       { IPv4(195,206,160,0),19 },
+       { IPv4(195,210,0,0),19 },
+       { IPv4(195,211,34,0),23 },
+       { IPv4(195,211,99,0),24 },
+       { IPv4(195,212,0,0),16 },
+       { IPv4(195,213,0,0),16 },
+       { IPv4(195,213,87,0),24 },
+       { IPv4(195,214,128,0),19 },
+       { IPv4(195,214,160,0),20 },
+       { IPv4(195,214,176,0),21 },
+       { IPv4(195,214,184,0),22 },
+       { IPv4(195,218,96,0),19 },
+       { IPv4(195,224,0,0),16 },
+       { IPv4(195,226,0,0),22 },
+       { IPv4(195,226,128,0),19 },
+       { IPv4(195,230,0,0),22 },
+       { IPv4(195,230,4,0),23 },
+       { IPv4(195,230,6,0),24 },
+       { IPv4(195,230,7,0),24 },
+       { IPv4(195,230,8,0),21 },
+       { IPv4(195,230,16,0),20 },
+       { IPv4(195,240,0,0),16 },
+       { IPv4(195,241,0,0),16 },
+       { IPv4(195,244,224,0),19 },
+       { IPv4(195,246,0,0),19 },
+       { IPv4(195,246,96,0),19 },
+       { IPv4(195,246,128,0),19 },
+       { IPv4(195,246,204,0),23 },
+       { IPv4(195,246,206,0),23 },
+       { IPv4(195,248,192,0),24 },
+       { IPv4(195,248,210,0),24 },
+       { IPv4(195,248,220,0),24 },
+       { IPv4(195,250,96,0),19 },
+       { IPv4(196,1,32,0),24 },
+       { IPv4(196,1,103,0),24 },
+       { IPv4(196,1,130,0),24 },
+       { IPv4(196,1,131,0),24 },
+       { IPv4(196,1,132,0),24 },
+       { IPv4(196,1,133,0),24 },
+       { IPv4(196,2,0,0),24 },
+       { IPv4(196,2,1,0),24 },
+       { IPv4(196,3,0,0),24 },
+       { IPv4(196,3,1,0),24 },
+       { IPv4(196,3,2,0),24 },
+       { IPv4(196,3,3,0),24 },
+       { IPv4(196,3,4,0),24 },
+       { IPv4(196,3,5,0),24 },
+       { IPv4(196,3,6,0),24 },
+       { IPv4(196,3,7,0),24 },
+       { IPv4(196,3,47,0),24 },
+       { IPv4(196,3,51,0),24 },
+       { IPv4(196,3,54,0),24 },
+       { IPv4(196,3,95,0),24 },
+       { IPv4(196,3,113,0),24 },
+       { IPv4(196,3,114,0),24 },
+       { IPv4(196,3,115,0),24 },
+       { IPv4(196,3,124,0),22 },
+       { IPv4(196,3,128,0),22 },
+       { IPv4(196,3,156,0),23 },
+       { IPv4(196,3,159,0),24 },
+       { IPv4(196,3,164,0),22 },
+       { IPv4(196,3,168,0),21 },
+       { IPv4(196,3,176,0),22 },
+       { IPv4(196,3,193,0),24 },
+       { IPv4(196,3,198,0),24 },
+       { IPv4(196,3,199,0),24 },
+       { IPv4(196,3,218,0),23 },
+       { IPv4(196,3,220,0),24 },
+       { IPv4(196,3,221,0),24 },
+       { IPv4(196,4,2,0),24 },
+       { IPv4(196,4,20,0),22 },
+       { IPv4(196,4,24,0),22 },
+       { IPv4(196,4,28,0),23 },
+       { IPv4(196,4,49,0),24 },
+       { IPv4(196,4,51,0),24 },
+       { IPv4(196,4,53,0),24 },
+       { IPv4(196,4,58,0),24 },
+       { IPv4(196,4,61,0),24 },
+       { IPv4(196,4,62,0),23 },
+       { IPv4(196,4,64,0),22 },
+       { IPv4(196,4,70,0),24 },
+       { IPv4(196,4,71,0),24 },
+       { IPv4(196,4,72,0),22 },
+       { IPv4(196,4,76,0),24 },
+       { IPv4(196,4,79,0),24 },
+       { IPv4(196,4,81,0),24 },
+       { IPv4(196,4,83,0),24 },
+       { IPv4(196,4,84,0),24 },
+       { IPv4(196,4,87,0),24 },
+       { IPv4(196,4,88,0),22 },
+       { IPv4(196,4,92,0),23 },
+       { IPv4(196,4,95,0),24 },
+       { IPv4(196,4,96,0),24 },
+       { IPv4(196,4,97,0),24 },
+       { IPv4(196,4,100,0),22 },
+       { IPv4(196,4,104,0),24 },
+       { IPv4(196,4,105,0),24 },
+       { IPv4(196,4,143,0),24 },
+       { IPv4(196,4,144,0),24 },
+       { IPv4(196,4,145,0),24 },
+       { IPv4(196,4,146,0),24 },
+       { IPv4(196,4,162,0),23 },
+       { IPv4(196,4,164,0),24 },
+       { IPv4(196,4,165,0),24 },
+       { IPv4(196,4,168,0),24 },
+       { IPv4(196,4,172,0),24 },
+       { IPv4(196,4,173,0),24 },
+       { IPv4(196,4,174,0),23 },
+       { IPv4(196,4,176,0),21 },
+       { IPv4(196,4,184,0),22 },
+       { IPv4(196,4,188,0),24 },
+       { IPv4(196,4,212,0),22 },
+       { IPv4(196,4,216,0),21 },
+       { IPv4(196,4,224,0),21 },
+       { IPv4(196,4,232,0),21 },
+       { IPv4(196,4,239,0),24 },
+       { IPv4(196,4,240,0),24 },
+       { IPv4(196,4,240,0),21 },
+       { IPv4(196,4,241,0),24 },
+       { IPv4(196,4,242,0),24 },
+       { IPv4(196,4,243,0),24 },
+       { IPv4(196,4,244,0),24 },
+       { IPv4(196,4,245,0),24 },
+       { IPv4(196,4,246,0),24 },
+       { IPv4(196,4,247,0),24 },
+       { IPv4(196,4,248,0),24 },
+       { IPv4(196,4,250,0),23 },
+       { IPv4(196,5,0,0),16 },
+       { IPv4(196,6,1,0),24 },
+       { IPv4(196,6,121,0),24 },
+       { IPv4(196,6,149,0),24 },
+       { IPv4(196,6,150,0),24 },
+       { IPv4(196,6,151,0),24 },
+       { IPv4(196,6,153,0),24 },
+       { IPv4(196,6,175,0),24 },
+       { IPv4(196,6,176,0),23 },
+       { IPv4(196,6,178,0),24 },
+       { IPv4(196,6,183,0),24 },
+       { IPv4(196,6,184,0),24 },
+       { IPv4(196,6,196,0),24 },
+       { IPv4(196,6,198,0),24 },
+       { IPv4(196,6,199,0),24 },
+       { IPv4(196,6,201,0),24 },
+       { IPv4(196,6,208,0),24 },
+       { IPv4(196,6,211,0),24 },
+       { IPv4(196,6,212,0),23 },
+       { IPv4(196,6,212,0),24 },
+       { IPv4(196,6,213,0),24 },
+       { IPv4(196,6,214,0),24 },
+       { IPv4(196,6,220,0),23 },
+       { IPv4(196,6,222,0),23 },
+       { IPv4(196,6,237,0),24 },
+       { IPv4(196,6,241,0),24 },
+       { IPv4(196,6,242,0),24 },
+       { IPv4(196,6,243,0),24 },
+       { IPv4(196,6,251,0),24 },
+       { IPv4(196,8,115,0),24 },
+       { IPv4(196,9,0,0),16 },
+       { IPv4(196,10,51,0),24 },
+       { IPv4(196,10,96,0),24 },
+       { IPv4(196,10,101,0),24 },
+       { IPv4(196,10,104,0),24 },
+       { IPv4(196,10,106,0),24 },
+       { IPv4(196,10,107,0),24 },
+       { IPv4(196,10,108,0),24 },
+       { IPv4(196,10,110,0),24 },
+       { IPv4(196,10,111,0),24 },
+       { IPv4(196,10,112,0),22 },
+       { IPv4(196,10,118,0),24 },
+       { IPv4(196,10,119,0),24 },
+       { IPv4(196,10,121,0),24 },
+       { IPv4(196,10,122,0),23 },
+       { IPv4(196,10,124,0),22 },
+       { IPv4(196,10,130,0),23 },
+       { IPv4(196,10,132,0),22 },
+       { IPv4(196,10,136,0),22 },
+       { IPv4(196,10,140,0),22 },
+       { IPv4(196,10,150,0),23 },
+       { IPv4(196,10,152,0),21 },
+       { IPv4(196,10,160,0),19 },
+       { IPv4(196,10,192,0),21 },
+       { IPv4(196,10,203,0),24 },
+       { IPv4(196,10,204,0),22 },
+       { IPv4(196,10,208,0),22 },
+       { IPv4(196,10,212,0),23 },
+       { IPv4(196,10,224,0),24 },
+       { IPv4(196,10,229,0),24 },
+       { IPv4(196,10,231,0),24 },
+       { IPv4(196,10,248,0),23 },
+       { IPv4(196,10,249,0),24 },
+       { IPv4(196,10,251,0),24 },
+       { IPv4(196,10,252,0),23 },
+       { IPv4(196,10,254,0),24 },
+       { IPv4(196,11,0,0),20 },
+       { IPv4(196,11,16,0),21 },
+       { IPv4(196,11,24,0),22 },
+       { IPv4(196,11,28,0),23 },
+       { IPv4(196,11,30,0),24 },
+       { IPv4(196,11,40,0),21 },
+       { IPv4(196,11,57,0),24 },
+       { IPv4(196,11,58,0),24 },
+       { IPv4(196,11,61,0),24 },
+       { IPv4(196,11,69,0),24 },
+       { IPv4(196,11,70,0),24 },
+       { IPv4(196,11,72,0),24 },
+       { IPv4(196,11,98,0),24 },
+       { IPv4(196,11,109,0),24 },
+       { IPv4(196,11,110,0),23 },
+       { IPv4(196,11,112,0),23 },
+       { IPv4(196,11,114,0),24 },
+       { IPv4(196,11,116,0),24 },
+       { IPv4(196,11,122,0),24 },
+       { IPv4(196,11,124,0),24 },
+       { IPv4(196,11,127,0),24 },
+       { IPv4(196,11,134,0),24 },
+       { IPv4(196,11,135,0),24 },
+       { IPv4(196,11,136,0),21 },
+       { IPv4(196,11,144,0),23 },
+       { IPv4(196,11,146,0),23 },
+       { IPv4(196,11,148,0),23 },
+       { IPv4(196,11,170,0),23 },
+       { IPv4(196,11,172,0),23 },
+       { IPv4(196,11,174,0),24 },
+       { IPv4(196,11,188,0),23 },
+       { IPv4(196,11,188,0),24 },
+       { IPv4(196,11,190,0),24 },
+       { IPv4(196,11,192,0),22 },
+       { IPv4(196,11,196,0),24 },
+       { IPv4(196,11,197,0),24 },
+       { IPv4(196,11,200,0),22 },
+       { IPv4(196,11,204,0),24 },
+       { IPv4(196,11,205,0),24 },
+       { IPv4(196,11,209,0),24 },
+       { IPv4(196,11,233,0),24 },
+       { IPv4(196,11,235,0),24 },
+       { IPv4(196,11,239,0),24 },
+       { IPv4(196,11,240,0),23 },
+       { IPv4(196,11,243,0),24 },
+       { IPv4(196,11,244,0),23 },
+       { IPv4(196,11,251,0),24 },
+       { IPv4(196,12,16,0),24 },
+       { IPv4(196,12,160,0),24 },
+       { IPv4(196,12,161,0),24 },
+       { IPv4(196,12,162,0),24 },
+       { IPv4(196,12,163,0),24 },
+       { IPv4(196,12,164,0),24 },
+       { IPv4(196,12,165,0),24 },
+       { IPv4(196,12,166,0),24 },
+       { IPv4(196,12,167,0),24 },
+       { IPv4(196,12,168,0),24 },
+       { IPv4(196,12,169,0),24 },
+       { IPv4(196,12,170,0),24 },
+       { IPv4(196,12,171,0),24 },
+       { IPv4(196,12,172,0),24 },
+       { IPv4(196,12,173,0),24 },
+       { IPv4(196,12,174,0),24 },
+       { IPv4(196,12,175,0),24 },
+       { IPv4(196,12,176,0),24 },
+       { IPv4(196,12,177,0),24 },
+       { IPv4(196,12,178,0),24 },
+       { IPv4(196,12,179,0),24 },
+       { IPv4(196,12,180,0),24 },
+       { IPv4(196,12,181,0),24 },
+       { IPv4(196,12,182,0),24 },
+       { IPv4(196,12,183,0),24 },
+       { IPv4(196,12,184,0),24 },
+       { IPv4(196,12,185,0),24 },
+       { IPv4(196,12,186,0),24 },
+       { IPv4(196,12,187,0),24 },
+       { IPv4(196,12,188,0),24 },
+       { IPv4(196,12,189,0),24 },
+       { IPv4(196,12,190,0),24 },
+       { IPv4(196,12,191,0),24 },
+       { IPv4(196,13,1,0),24 },
+       { IPv4(196,13,2,0),23 },
+       { IPv4(196,13,4,0),22 },
+       { IPv4(196,13,8,0),21 },
+       { IPv4(196,13,16,0),21 },
+       { IPv4(196,13,24,0),22 },
+       { IPv4(196,13,28,0),23 },
+       { IPv4(196,13,30,0),24 },
+       { IPv4(196,13,31,0),24 },
+       { IPv4(196,13,32,0),19 },
+       { IPv4(196,13,64,0),20 },
+       { IPv4(196,13,80,0),24 },
+       { IPv4(196,13,93,0),24 },
+       { IPv4(196,13,97,0),24 },
+       { IPv4(196,13,101,0),24 },
+       { IPv4(196,13,102,0),23 },
+       { IPv4(196,13,104,0),24 },
+       { IPv4(196,13,108,0),24 },
+       { IPv4(196,13,113,0),24 },
+       { IPv4(196,13,115,0),24 },
+       { IPv4(196,13,118,0),24 },
+       { IPv4(196,13,121,0),24 },
+       { IPv4(196,13,125,0),24 },
+       { IPv4(196,13,126,0),24 },
+       { IPv4(196,13,127,0),24 },
+       { IPv4(196,13,128,0),22 },
+       { IPv4(196,13,132,0),24 },
+       { IPv4(196,13,138,0),24 },
+       { IPv4(196,13,139,0),24 },
+       { IPv4(196,13,140,0),22 },
+       { IPv4(196,13,144,0),22 },
+       { IPv4(196,13,150,0),24 },
+       { IPv4(196,13,151,0),24 },
+       { IPv4(196,13,152,0),21 },
+       { IPv4(196,13,160,0),24 },
+       { IPv4(196,13,163,0),24 },
+       { IPv4(196,13,164,0),24 },
+       { IPv4(196,13,165,0),24 },
+       { IPv4(196,13,169,0),24 },
+       { IPv4(196,13,174,0),23 },
+       { IPv4(196,13,176,0),21 },
+       { IPv4(196,13,184,0),23 },
+       { IPv4(196,13,187,0),24 },
+       { IPv4(196,13,188,0),22 },
+       { IPv4(196,13,192,0),22 },
+       { IPv4(196,13,196,0),24 },
+       { IPv4(196,13,200,0),22 },
+       { IPv4(196,13,204,0),24 },
+       { IPv4(196,13,213,0),24 },
+       { IPv4(196,13,214,0),23 },
+       { IPv4(196,13,216,0),23 },
+       { IPv4(196,13,225,0),24 },
+       { IPv4(196,13,226,0),23 },
+       { IPv4(196,13,228,0),22 },
+       { IPv4(196,13,232,0),24 },
+       { IPv4(196,13,252,0),22 },
+       { IPv4(196,14,0,0),16 },
+       { IPv4(196,15,0,0),21 },
+       { IPv4(196,21,0,0),16 },
+       { IPv4(196,22,0,0),22 },
+       { IPv4(196,22,32,0),24 },
+       { IPv4(196,22,160,0),19 },
+       { IPv4(196,22,162,0),24 },
+       { IPv4(196,22,166,0),24 },
+       { IPv4(196,22,170,0),24 },
+       { IPv4(196,22,176,0),24 },
+       { IPv4(196,22,181,0),24 },
+       { IPv4(196,22,182,0),24 },
+       { IPv4(196,22,183,0),24 },
+       { IPv4(196,22,189,0),24 },
+       { IPv4(196,23,0,0),16 },
+       { IPv4(196,24,0,0),16 },
+       { IPv4(196,26,0,0),16 },
+       { IPv4(196,27,12,0),24 },
+       { IPv4(196,27,15,0),24 },
+       { IPv4(196,27,19,0),24 },
+       { IPv4(196,27,40,0),22 },
+       { IPv4(196,27,48,0),22 },
+       { IPv4(196,28,5,0),24 },
+       { IPv4(196,28,8,0),24 },
+       { IPv4(196,28,16,0),20 },
+       { IPv4(196,28,32,0),20 },
+       { IPv4(196,28,64,0),19 },
+       { IPv4(196,28,96,0),19 },
+       { IPv4(196,29,6,0),24 },
+       { IPv4(196,29,32,0),24 },
+       { IPv4(196,29,33,0),24 },
+       { IPv4(196,29,34,0),24 },
+       { IPv4(196,29,35,0),24 },
+       { IPv4(196,29,36,0),24 },
+       { IPv4(196,29,37,0),24 },
+       { IPv4(196,29,38,0),24 },
+       { IPv4(196,29,39,0),24 },
+       { IPv4(196,32,0,0),21 },
+       { IPv4(196,33,0,0),16 },
+       { IPv4(196,34,0,0),15 },
+       { IPv4(196,36,0,0),16 },
+       { IPv4(196,37,0,0),16 },
+       { IPv4(196,38,0,0),16 },
+       { IPv4(196,39,0,0),17 },
+       { IPv4(196,41,67,0),24 },
+       { IPv4(196,41,128,0),19 },
+       { IPv4(196,41,160,0),20 },
+       { IPv4(196,41,171,0),24 },
+       { IPv4(196,41,192,0),19 },
+       { IPv4(196,43,0,0),18 },
+       { IPv4(196,44,0,0),19 },
+       { IPv4(198,1,32,0),20 },
+       { IPv4(198,1,35,0),24 },
+       { IPv4(198,1,36,0),23 },
+       { IPv4(198,1,48,0),22 },
+       { IPv4(198,3,16,0),20 },
+       { IPv4(198,3,122,0),24 },
+       { IPv4(198,3,124,0),24 },
+       { IPv4(198,4,44,0),24 },
+       { IPv4(198,4,64,0),20 },
+       { IPv4(198,5,5,0),24 },
+       { IPv4(198,5,6,0),24 },
+       { IPv4(198,5,222,0),23 },
+       { IPv4(198,6,80,0),24 },
+       { IPv4(198,6,95,0),24 },
+       { IPv4(198,6,196,0),24 },
+       { IPv4(198,6,245,0),24 },
+       { IPv4(198,6,255,0),24 },
+       { IPv4(198,7,0,0),21 },
+       { IPv4(198,7,128,0),18 },
+       { IPv4(198,7,142,0),24 },
+       { IPv4(198,8,16,0),20 },
+       { IPv4(198,8,32,0),20 },
+       { IPv4(198,8,48,0),21 },
+       { IPv4(198,8,56,0),24 },
+       { IPv4(198,8,64,0),22 },
+       { IPv4(198,8,68,0),23 },
+       { IPv4(198,9,0,0),16 },
+       { IPv4(198,10,0,0),16 },
+       { IPv4(198,11,16,0),20 },
+       { IPv4(198,11,32,0),19 },
+       { IPv4(198,11,57,0),24 },
+       { IPv4(198,11,58,0),24 },
+       { IPv4(198,14,32,0),19 },
+       { IPv4(198,17,5,0),24 },
+       { IPv4(198,17,37,0),24 },
+       { IPv4(198,17,39,0),24 },
+       { IPv4(198,17,40,0),24 },
+       { IPv4(198,17,46,0),24 },
+       { IPv4(198,17,47,0),24 },
+       { IPv4(198,17,57,0),24 },
+       { IPv4(198,17,59,0),24 },
+       { IPv4(198,17,62,0),24 },
+       { IPv4(198,17,81,0),24 },
+       { IPv4(198,17,101,0),24 },
+       { IPv4(198,17,107,0),24 },
+       { IPv4(198,17,138,0),24 },
+       { IPv4(198,17,144,0),24 },
+       { IPv4(198,17,145,0),24 },
+       { IPv4(198,17,150,0),23 },
+       { IPv4(198,17,169,0),24 },
+       { IPv4(198,17,176,0),24 },
+       { IPv4(198,17,183,0),24 },
+       { IPv4(198,17,184,0),24 },
+       { IPv4(198,17,184,0),23 },
+       { IPv4(198,17,186,0),24 },
+       { IPv4(198,17,189,0),24 },
+       { IPv4(198,17,191,0),24 },
+       { IPv4(198,17,192,0),23 },
+       { IPv4(198,17,194,0),24 },
+       { IPv4(198,17,200,0),24 },
+       { IPv4(198,17,205,0),24 },
+       { IPv4(198,17,235,0),24 },
+       { IPv4(198,17,242,0),24 },
+       { IPv4(198,17,243,0),24 },
+       { IPv4(198,17,247,0),24 },
+       { IPv4(198,17,249,0),24 },
+       { IPv4(198,20,8,0),21 },
+       { IPv4(198,22,5,0),24 },
+       { IPv4(198,22,6,0),23 },
+       { IPv4(198,22,8,0),23 },
+       { IPv4(198,22,19,0),24 },
+       { IPv4(198,22,28,0),24 },
+       { IPv4(198,22,41,0),24 },
+       { IPv4(198,22,62,0),23 },
+       { IPv4(198,22,64,0),22 },
+       { IPv4(198,22,99,0),24 },
+       { IPv4(198,22,109,0),24 },
+       { IPv4(198,22,110,0),24 },
+       { IPv4(198,22,121,0),24 },
+       { IPv4(198,22,129,0),24 },
+       { IPv4(198,22,133,0),24 },
+       { IPv4(198,22,137,0),24 },
+       { IPv4(198,22,146,0),24 },
+       { IPv4(198,22,176,0),24 },
+       { IPv4(198,22,229,0),24 },
+       { IPv4(198,22,230,0),24 },
+       { IPv4(198,22,249,0),24 },
+       { IPv4(198,24,6,0),24 },
+       { IPv4(198,25,0,0),16 },
+       { IPv4(198,25,24,0),24 },
+       { IPv4(198,25,35,0),24 },
+       { IPv4(198,25,42,0),24 },
+       { IPv4(198,25,48,0),24 },
+       { IPv4(198,25,50,0),24 },
+       { IPv4(198,25,67,0),24 },
+       { IPv4(198,25,72,0),24 },
+       { IPv4(198,25,102,0),24 },
+       { IPv4(198,25,141,0),24 },
+       { IPv4(198,25,142,0),24 },
+       { IPv4(198,25,143,0),24 },
+       { IPv4(198,25,150,0),24 },
+       { IPv4(198,25,190,0),24 },
+       { IPv4(198,25,191,0),24 },
+       { IPv4(198,25,192,0),24 },
+       { IPv4(198,25,195,0),24 },
+       { IPv4(198,25,197,0),24 },
+       { IPv4(198,25,199,0),24 },
+       { IPv4(198,25,202,0),24 },
+       { IPv4(198,25,212,0),24 },
+       { IPv4(198,25,230,0),24 },
+       { IPv4(198,25,231,0),24 },
+       { IPv4(198,25,232,0),24 },
+       { IPv4(198,25,236,0),24 },
+       { IPv4(198,25,237,0),24 },
+       { IPv4(198,25,239,0),24 },
+       { IPv4(198,25,240,0),24 },
+       { IPv4(198,25,242,0),24 },
+       { IPv4(198,25,243,0),24 },
+       { IPv4(198,26,0,0),16 },
+       { IPv4(198,26,118,0),24 },
+       { IPv4(198,26,171,0),24 },
+       { IPv4(198,26,172,0),24 },
+       { IPv4(198,26,173,0),24 },
+       { IPv4(198,26,174,0),24 },
+       { IPv4(198,26,175,0),24 },
+       { IPv4(198,26,177,0),24 },
+       { IPv4(198,26,178,0),24 },
+       { IPv4(198,26,180,0),24 },
+       { IPv4(198,26,181,0),24 },
+       { IPv4(198,26,182,0),24 },
+       { IPv4(198,26,183,0),24 },
+       { IPv4(198,26,186,0),24 },
+       { IPv4(198,26,187,0),24 },
+       { IPv4(198,26,188,0),24 },
+       { IPv4(198,26,190,0),24 },
+       { IPv4(198,26,192,0),24 },
+       { IPv4(198,26,199,0),24 },
+       { IPv4(198,26,227,0),24 },
+       { IPv4(198,27,18,0),24 },
+       { IPv4(198,27,24,0),24 },
+       { IPv4(198,27,38,0),24 },
+       { IPv4(198,27,47,0),24 },
+       { IPv4(198,27,48,0),24 },
+       { IPv4(198,27,54,0),24 },
+       { IPv4(198,28,128,0),24 },
+       { IPv4(198,29,0,0),22 },
+       { IPv4(198,31,9,0),24 },
+       { IPv4(198,31,31,0),24 },
+       { IPv4(198,31,158,0),23 },
+       { IPv4(198,31,232,0),23 },
+       { IPv4(198,31,238,0),24 },
+       { IPv4(198,31,238,0),23 },
+       { IPv4(198,31,239,0),24 },
+       { IPv4(198,32,42,0),24 },
+       { IPv4(198,32,64,0),24 },
+       { IPv4(198,32,114,0),24 },
+       { IPv4(198,32,128,0),24 },
+       { IPv4(198,32,136,0),24 },
+       { IPv4(198,32,139,0),24 },
+       { IPv4(198,32,176,0),24 },
+       { IPv4(198,32,177,0),24 },
+       { IPv4(198,32,184,0),24 },
+       { IPv4(198,32,200,0),24 },
+       { IPv4(198,32,212,0),24 },
+       { IPv4(198,32,216,0),24 },
+       { IPv4(198,32,220,0),24 },
+       { IPv4(198,32,224,0),24 },
+       { IPv4(198,32,248,0),24 },
+       { IPv4(198,32,249,0),24 },
+       { IPv4(198,32,251,0),24 },
+       { IPv4(198,34,224,0),21 },
+       { IPv4(198,35,1,0),24 },
+       { IPv4(198,35,2,0),24 },
+       { IPv4(198,35,3,0),24 },
+       { IPv4(198,35,4,0),24 },
+       { IPv4(198,35,5,0),24 },
+       { IPv4(198,35,6,0),24 },
+       { IPv4(198,35,7,0),24 },
+       { IPv4(198,35,8,0),24 },
+       { IPv4(198,35,9,0),24 },
+       { IPv4(198,35,10,0),24 },
+       { IPv4(198,35,11,0),24 },
+       { IPv4(198,35,12,0),24 },
+       { IPv4(198,35,13,0),24 },
+       { IPv4(198,35,14,0),24 },
+       { IPv4(198,35,15,0),24 },
+       { IPv4(198,35,128,0),24 },
+       { IPv4(198,36,16,0),21 },
+       { IPv4(198,36,24,0),22 },
+       { IPv4(198,36,180,0),23 },
+       { IPv4(198,36,190,0),24 },
+       { IPv4(198,37,16,0),21 },
+       { IPv4(198,37,24,0),22 },
+       { IPv4(198,38,8,0),22 },
+       { IPv4(198,38,12,0),24 },
+       { IPv4(198,40,16,0),21 },
+       { IPv4(198,40,24,0),22 },
+       { IPv4(198,40,28,0),23 },
+       { IPv4(198,40,30,0),24 },
+       { IPv4(198,41,0,0),24 },
+       { IPv4(198,41,1,0),24 },
+       { IPv4(198,41,3,0),24 },
+       { IPv4(198,41,6,0),24 },
+       { IPv4(198,41,8,0),24 },
+       { IPv4(198,41,9,0),24 },
+       { IPv4(198,41,10,0),24 },
+       { IPv4(198,41,11,0),24 },
+       { IPv4(198,43,100,0),24 },
+       { IPv4(198,43,237,0),24 },
+       { IPv4(198,45,18,0),24 },
+       { IPv4(198,45,20,0),24 },
+       { IPv4(198,45,22,0),24 },
+       { IPv4(198,45,23,0),24 },
+       { IPv4(198,45,24,0),24 },
+       { IPv4(198,46,0,0),21 },
+       { IPv4(198,46,8,0),24 },
+       { IPv4(198,46,9,0),24 },
+       { IPv4(198,46,75,0),24 },
+       { IPv4(198,48,16,0),23 },
+       { IPv4(198,48,16,0),24 },
+       { IPv4(198,48,17,0),24 },
+       { IPv4(198,49,22,0),24 },
+       { IPv4(198,49,45,0),24 },
+       { IPv4(198,49,92,0),23 },
+       { IPv4(198,49,103,0),24 },
+       { IPv4(198,49,104,0),24 },
+       { IPv4(198,49,114,0),24 },
+       { IPv4(198,49,120,0),22 },
+       { IPv4(198,49,143,0),24 },
+       { IPv4(198,49,144,0),23 },
+       { IPv4(198,49,168,0),23 },
+       { IPv4(198,49,174,0),24 },
+       { IPv4(198,49,182,0),24 },
+       { IPv4(198,49,183,0),24 },
+       { IPv4(198,49,184,0),21 },
+       { IPv4(198,49,192,0),24 },
+       { IPv4(198,49,205,0),24 },
+       { IPv4(198,49,206,0),24 },
+       { IPv4(198,49,207,0),24 },
+       { IPv4(198,49,208,0),24 },
+       { IPv4(198,49,224,0),21 },
+       { IPv4(198,49,232,0),22 },
+       { IPv4(198,49,236,0),24 },
+       { IPv4(198,49,239,0),24 },
+       { IPv4(198,49,240,0),24 },
+       { IPv4(198,49,241,0),24 },
+       { IPv4(198,50,1,0),24 },
+       { IPv4(198,50,7,0),24 },
+       { IPv4(198,50,9,0),24 },
+       { IPv4(198,51,13,0),24 },
+       { IPv4(198,51,14,0),24 },
+       { IPv4(198,51,35,0),24 },
+       { IPv4(198,51,72,0),24 },
+       { IPv4(198,51,90,0),24 },
+       { IPv4(198,51,93,0),24 },
+       { IPv4(198,51,94,0),24 },
+       { IPv4(198,51,109,0),24 },
+       { IPv4(198,51,141,0),24 },
+       { IPv4(198,51,170,0),23 },
+       { IPv4(198,51,173,0),24 },
+       { IPv4(198,51,177,0),24 },
+       { IPv4(198,51,178,0),24 },
+       { IPv4(198,51,184,0),23 },
+       { IPv4(198,51,191,0),24 },
+       { IPv4(198,51,192,0),24 },
+       { IPv4(198,51,193,0),24 },
+       { IPv4(198,51,209,0),24 },
+       { IPv4(198,51,210,0),24 },
+       { IPv4(198,51,214,0),24 },
+       { IPv4(198,51,215,0),24 },
+       { IPv4(198,51,234,0),24 },
+       { IPv4(198,51,237,0),24 },
+       { IPv4(198,51,238,0),24 },
+       { IPv4(198,51,239,0),24 },
+       { IPv4(198,51,241,0),24 },
+       { IPv4(198,52,0,0),22 },
+       { IPv4(198,53,26,0),23 },
+       { IPv4(198,53,26,0),24 },
+       { IPv4(198,54,20,0),22 },
+       { IPv4(198,54,24,0),21 },
+       { IPv4(198,54,32,0),21 },
+       { IPv4(198,54,40,0),23 },
+       { IPv4(198,54,58,0),24 },
+       { IPv4(198,54,64,0),24 },
+       { IPv4(198,54,65,0),24 },
+       { IPv4(198,54,66,0),24 },
+       { IPv4(198,54,68,0),24 },
+       { IPv4(198,54,71,0),24 },
+       { IPv4(198,54,72,0),22 },
+       { IPv4(198,54,80,0),24 },
+       { IPv4(198,54,82,0),24 },
+       { IPv4(198,54,83,0),24 },
+       { IPv4(198,54,84,0),24 },
+       { IPv4(198,54,90,0),23 },
+       { IPv4(198,54,92,0),24 },
+       { IPv4(198,54,149,0),24 },
+       { IPv4(198,54,154,0),23 },
+       { IPv4(198,54,155,0),24 },
+       { IPv4(198,54,156,0),22 },
+       { IPv4(198,54,160,0),23 },
+       { IPv4(198,54,162,0),24 },
+       { IPv4(198,54,163,0),24 },
+       { IPv4(198,54,165,0),24 },
+       { IPv4(198,54,170,0),24 },
+       { IPv4(198,54,173,0),24 },
+       { IPv4(198,54,174,0),24 },
+       { IPv4(198,54,184,0),22 },
+       { IPv4(198,54,188,0),23 },
+       { IPv4(198,54,192,0),22 },
+       { IPv4(198,54,196,0),24 },
+       { IPv4(198,54,202,0),24 },
+       { IPv4(198,54,219,0),24 },
+       { IPv4(198,54,222,0),23 },
+       { IPv4(198,54,225,0),24 },
+       { IPv4(198,54,234,0),24 },
+       { IPv4(198,54,253,0),24 },
+       { IPv4(198,55,4,0),24 },
+       { IPv4(198,55,8,0),21 },
+       { IPv4(198,55,64,0),20 },
+       { IPv4(198,55,69,0),24 },
+       { IPv4(198,55,80,0),21 },
+       { IPv4(198,55,84,0),24 },
+       { IPv4(198,55,85,0),24 },
+       { IPv4(198,55,86,0),24 },
+       { IPv4(198,55,87,0),24 },
+       { IPv4(198,55,88,0),22 },
+       { IPv4(198,55,89,0),24 },
+       { IPv4(198,55,92,0),23 },
+       { IPv4(198,55,93,0),24 },
+       { IPv4(198,56,0,0),21 },
+       { IPv4(198,57,64,0),20 },
+       { IPv4(198,58,0,0),24 },
+       { IPv4(198,58,1,0),24 },
+       { IPv4(198,58,16,0),22 },
+       { IPv4(198,58,16,0),21 },
+       { IPv4(198,58,20,0),22 },
+       { IPv4(198,58,24,0),22 },
+       { IPv4(198,58,24,0),23 },
+       { IPv4(198,58,26,0),23 },
+       { IPv4(198,58,37,0),24 },
+       { IPv4(198,58,38,0),24 },
+       { IPv4(198,58,64,0),21 },
+       { IPv4(198,58,64,0),22 },
+       { IPv4(198,58,68,0),22 },
+       { IPv4(198,58,70,0),24 },
+       { IPv4(198,58,71,0),24 },
+       { IPv4(198,59,0,0),18 },
+       { IPv4(198,59,2,0),24 },
+       { IPv4(198,59,7,0),24 },
+       { IPv4(198,59,36,0),24 },
+       { IPv4(198,59,40,0),24 },
+       { IPv4(198,59,46,0),24 },
+       { IPv4(198,59,47,0),24 },
+       { IPv4(198,59,48,0),24 },
+       { IPv4(198,59,49,0),24 },
+       { IPv4(198,59,54,0),24 },
+       { IPv4(198,59,55,0),24 },
+       { IPv4(198,59,59,0),24 },
+       { IPv4(198,59,61,0),24 },
+       { IPv4(198,59,64,0),19 },
+       { IPv4(198,59,69,0),24 },
+       { IPv4(198,59,70,0),24 },
+       { IPv4(198,59,81,0),24 },
+       { IPv4(198,59,82,0),24 },
+       { IPv4(198,59,83,0),24 },
+       { IPv4(198,59,87,0),24 },
+       { IPv4(198,59,89,0),24 },
+       { IPv4(198,59,93,0),24 },
+       { IPv4(198,60,0,0),18 },
+       { IPv4(198,60,1,0),24 },
+       { IPv4(198,60,3,0),24 },
+       { IPv4(198,60,4,0),24 },
+       { IPv4(198,60,5,0),24 },
+       { IPv4(198,60,9,0),24 },
+       { IPv4(198,60,17,0),24 },
+       { IPv4(198,60,22,0),24 },
+       { IPv4(198,60,64,0),19 },
+       { IPv4(198,60,72,0),22 },
+       { IPv4(198,60,80,0),23 },
+       { IPv4(198,60,82,0),24 },
+       { IPv4(198,60,84,0),24 },
+       { IPv4(198,60,85,0),24 },
+       { IPv4(198,60,86,0),23 },
+       { IPv4(198,60,88,0),22 },
+       { IPv4(198,60,92,0),24 },
+       { IPv4(198,60,93,0),24 },
+       { IPv4(198,60,94,0),24 },
+       { IPv4(198,60,95,0),24 },
+       { IPv4(198,60,96,0),20 },
+       { IPv4(198,60,96,0),24 },
+       { IPv4(198,60,97,0),24 },
+       { IPv4(198,60,98,0),24 },
+       { IPv4(198,60,99,0),24 },
+       { IPv4(198,60,100,0),24 },
+       { IPv4(198,60,101,0),24 },
+       { IPv4(198,60,102,0),24 },
+       { IPv4(198,60,103,0),24 },
+       { IPv4(198,60,104,0),24 },
+       { IPv4(198,60,105,0),24 },
+       { IPv4(198,60,109,0),24 },
+       { IPv4(198,60,110,0),24 },
+       { IPv4(198,60,112,0),21 },
+       { IPv4(198,60,114,0),24 },
+       { IPv4(198,60,121,0),24 },
+       { IPv4(198,60,122,0),23 },
+       { IPv4(198,60,124,0),22 },
+       { IPv4(198,60,129,0),24 },
+       { IPv4(198,60,132,0),24 },
+       { IPv4(198,60,143,0),24 },
+       { IPv4(198,60,144,0),20 },
+       { IPv4(198,60,148,0),24 },
+       { IPv4(198,60,152,0),24 },
+       { IPv4(198,60,159,0),24 },
+       { IPv4(198,60,160,0),19 },
+       { IPv4(198,60,183,0),24 },
+       { IPv4(198,60,186,0),24 },
+       { IPv4(198,60,195,0),24 },
+       { IPv4(198,60,217,0),24 },
+       { IPv4(198,60,218,0),23 },
+       { IPv4(198,60,220,0),22 },
+       { IPv4(198,60,224,0),22 },
+       { IPv4(198,60,251,0),24 },
+       { IPv4(198,61,16,0),20 },
+       { IPv4(198,62,8,0),24 },
+       { IPv4(198,62,9,0),24 },
+       { IPv4(198,62,10,0),24 },
+       { IPv4(198,62,11,0),24 },
+       { IPv4(198,62,64,0),24 },
+       { IPv4(198,62,65,0),24 },
+       { IPv4(198,62,66,0),24 },
+       { IPv4(198,62,106,0),24 },
+       { IPv4(198,62,112,0),24 },
+       { IPv4(198,62,120,0),23 },
+       { IPv4(198,62,142,0),24 },
+       { IPv4(198,62,155,0),24 },
+       { IPv4(198,62,160,0),24 },
+       { IPv4(198,62,186,0),24 },
+       { IPv4(198,62,187,0),24 },
+       { IPv4(198,62,198,0),24 },
+       { IPv4(198,62,205,0),24 },
+       { IPv4(198,62,209,0),24 },
+       { IPv4(198,62,210,0),24 },
+       { IPv4(198,62,212,0),24 },
+       { IPv4(198,62,230,0),24 },
+       { IPv4(198,62,231,0),24 },
+       { IPv4(198,62,232,0),24 },
+       { IPv4(198,62,233,0),24 },
+       { IPv4(198,62,242,0),24 },
+       { IPv4(198,62,246,0),23 },
+       { IPv4(198,62,248,0),23 },
+       { IPv4(198,62,250,0),24 },
+       { IPv4(198,63,0,0),16 },
+       { IPv4(198,63,24,0),24 },
+       { IPv4(198,63,193,0),24 },
+       { IPv4(198,63,227,0),24 },
+       { IPv4(198,64,0,0),15 },
+       { IPv4(198,64,127,0),24 },
+       { IPv4(198,65,199,0),24 },
+       { IPv4(198,66,0,0),16 },
+       { IPv4(198,67,15,0),24 },
+       { IPv4(198,67,33,0),24 },
+       { IPv4(198,67,38,0),24 },
+       { IPv4(198,68,64,0),18 },
+       { IPv4(198,68,128,0),20 },
+       { IPv4(198,68,144,0),20 },
+       { IPv4(198,68,164,0),22 },
+       { IPv4(198,68,168,0),21 },
+       { IPv4(198,68,181,0),24 },
+       { IPv4(198,68,193,0),24 },
+       { IPv4(198,68,224,0),20 },
+       { IPv4(198,69,2,0),23 },
+       { IPv4(198,69,26,0),24 },
+       { IPv4(198,69,80,0),23 },
+       { IPv4(198,69,82,0),24 },
+       { IPv4(198,69,84,0),22 },
+       { IPv4(198,69,88,0),21 },
+       { IPv4(198,69,90,0),24 },
+       { IPv4(198,69,131,0),24 },
+       { IPv4(198,69,134,0),24 },
+       { IPv4(198,69,184,0),23 },
+       { IPv4(198,69,186,0),23 },
+       { IPv4(198,69,188,0),22 },
+       { IPv4(198,69,191,0),24 },
+       { IPv4(198,70,176,0),20 },
+       { IPv4(198,70,195,0),24 },
+       { IPv4(198,70,196,0),22 },
+       { IPv4(198,70,209,0),24 },
+       { IPv4(198,70,220,0),23 },
+       { IPv4(198,70,222,0),23 },
+       { IPv4(198,70,224,0),20 },
+       { IPv4(198,70,240,0),23 },
+       { IPv4(198,70,242,0),24 },
+       { IPv4(198,70,243,0),24 },
+       { IPv4(198,70,244,0),23 },
+       { IPv4(198,72,0,0),22 },
+       { IPv4(198,72,5,0),24 },
+       { IPv4(198,72,8,0),22 },
+       { IPv4(198,72,12,0),24 },
+       { IPv4(198,72,32,0),21 },
+       { IPv4(198,72,40,0),23 },
+       { IPv4(198,72,64,0),21 },
+       { IPv4(198,72,72,0),22 },
+       { IPv4(198,73,137,0),24 },
+       { IPv4(198,73,138,0),24 },
+       { IPv4(198,73,139,0),24 },
+       { IPv4(198,73,176,0),24 },
+       { IPv4(198,73,190,0),24 },
+       { IPv4(198,73,248,0),24 },
+       { IPv4(198,73,249,0),24 },
+       { IPv4(198,73,252,0),24 },
+       { IPv4(198,73,253,0),24 },
+       { IPv4(198,74,16,0),24 },
+       { IPv4(198,74,18,0),24 },
+       { IPv4(198,74,20,0),24 },
+       { IPv4(198,74,22,0),24 },
+       { IPv4(198,74,24,0),24 },
+       { IPv4(198,74,25,0),24 },
+       { IPv4(198,74,26,0),24 },
+       { IPv4(198,74,32,0),21 },
+       { IPv4(198,74,40,0),23 },
+       { IPv4(198,76,23,0),24 },
+       { IPv4(198,76,29,0),24 },
+       { IPv4(198,76,30,0),24 },
+       { IPv4(198,76,31,0),24 },
+       { IPv4(198,76,126,0),23 },
+       { IPv4(198,76,162,0),24 },
+       { IPv4(198,76,176,0),23 },
+       { IPv4(198,76,178,0),24 },
+       { IPv4(198,77,0,0),18 },
+       { IPv4(198,77,32,0),23 },
+       { IPv4(198,77,54,0),23 },
+       { IPv4(198,77,56,0),22 },
+       { IPv4(198,77,66,0),24 },
+       { IPv4(198,77,86,0),23 },
+       { IPv4(198,77,104,0),24 },
+       { IPv4(198,77,105,0),24 },
+       { IPv4(198,77,106,0),24 },
+       { IPv4(198,77,110,0),24 },
+       { IPv4(198,77,112,0),22 },
+       { IPv4(198,77,136,0),24 },
+       { IPv4(198,77,248,0),23 },
+       { IPv4(198,78,8,0),21 },
+       { IPv4(198,78,80,0),20 },
+       { IPv4(198,78,96,0),20 },
+       { IPv4(198,78,137,0),24 },
+       { IPv4(198,78,138,0),24 },
+       { IPv4(198,78,224,0),20 },
+       { IPv4(198,79,24,0),22 },
+       { IPv4(198,79,88,0),21 },
+       { IPv4(198,80,0,0),23 },
+       { IPv4(198,80,15,0),24 },
+       { IPv4(198,80,20,0),24 },
+       { IPv4(198,80,56,0),24 },
+       { IPv4(198,80,57,0),24 },
+       { IPv4(198,80,58,0),24 },
+       { IPv4(198,80,59,0),24 },
+       { IPv4(198,80,68,0),22 },
+       { IPv4(198,80,88,0),21 },
+       { IPv4(198,80,129,0),24 },
+       { IPv4(198,80,130,0),24 },
+       { IPv4(198,80,132,0),24 },
+       { IPv4(198,80,135,0),24 },
+       { IPv4(198,80,136,0),24 },
+       { IPv4(198,80,137,0),24 },
+       { IPv4(198,80,138,0),24 },
+       { IPv4(198,80,139,0),24 },
+       { IPv4(198,80,140,0),24 },
+       { IPv4(198,80,142,0),24 },
+       { IPv4(198,80,143,0),24 },
+       { IPv4(198,80,144,0),24 },
+       { IPv4(198,80,145,0),24 },
+       { IPv4(198,80,146,0),24 },
+       { IPv4(198,80,151,0),24 },
+       { IPv4(198,80,152,0),24 },
+       { IPv4(198,80,153,0),24 },
+       { IPv4(198,80,155,0),24 },
+       { IPv4(198,80,156,0),24 },
+       { IPv4(198,80,157,0),24 },
+       { IPv4(198,80,159,0),24 },
+       { IPv4(198,80,160,0),24 },
+       { IPv4(198,80,162,0),24 },
+       { IPv4(198,80,164,0),24 },
+       { IPv4(198,80,165,0),24 },
+       { IPv4(198,80,167,0),24 },
+       { IPv4(198,80,169,0),24 },
+       { IPv4(198,80,170,0),24 },
+       { IPv4(198,80,171,0),24 },
+       { IPv4(198,80,172,0),24 },
+       { IPv4(198,80,173,0),24 },
+       { IPv4(198,80,174,0),24 },
+       { IPv4(198,80,178,0),24 },
+       { IPv4(198,80,180,0),24 },
+       { IPv4(198,80,182,0),24 },
+       { IPv4(198,80,183,0),24 },
+       { IPv4(198,80,185,0),24 },
+       { IPv4(198,80,186,0),24 },
+       { IPv4(198,80,187,0),24 },
+       { IPv4(198,80,189,0),24 },
+       { IPv4(198,80,191,0),24 },
+       { IPv4(198,81,0,0),19 },
+       { IPv4(198,81,4,0),22 },
+       { IPv4(198,81,16,0),20 },
+       { IPv4(198,81,200,0),24 },
+       { IPv4(198,81,230,0),24 },
+       { IPv4(198,81,240,0),24 },
+       { IPv4(198,83,19,0),24 },
+       { IPv4(198,83,28,0),22 },
+       { IPv4(198,83,112,0),20 },
+       { IPv4(198,83,130,0),24 },
+       { IPv4(198,84,16,0),20 },
+       { IPv4(198,84,51,0),24 },
+       { IPv4(198,84,52,0),24 },
+       { IPv4(198,85,74,0),23 },
+       { IPv4(198,85,116,0),24 },
+       { IPv4(198,87,0,0),16 },
+       { IPv4(198,88,0,0),16 },
+       { IPv4(198,89,35,0),24 },
+       { IPv4(198,89,36,0),24 },
+       { IPv4(198,89,37,0),24 },
+       { IPv4(198,89,138,0),24 },
+       { IPv4(198,89,159,0),24 },
+       { IPv4(198,89,160,0),24 },
+       { IPv4(198,91,64,0),24 },
+       { IPv4(198,91,65,0),24 },
+       { IPv4(198,91,66,0),24 },
+       { IPv4(198,91,67,0),24 },
+       { IPv4(198,91,70,0),24 },
+       { IPv4(198,91,71,0),24 },
+       { IPv4(198,91,73,0),24 },
+       { IPv4(198,92,64,0),22 },
+       { IPv4(198,92,104,0),21 },
+       { IPv4(198,92,156,0),23 },
+       { IPv4(198,92,208,0),22 },
+       { IPv4(198,93,92,0),22 },
+       { IPv4(198,93,108,0),24 },
+       { IPv4(198,93,109,0),24 },
+       { IPv4(198,93,110,0),24 },
+       { IPv4(198,93,111,0),24 },
+       { IPv4(198,93,134,0),23 },
+       { IPv4(198,93,136,0),22 },
+       { IPv4(198,94,128,0),21 },
+       { IPv4(198,95,8,0),23 },
+       { IPv4(198,95,10,0),24 },
+       { IPv4(198,95,64,0),21 },
+       { IPv4(198,95,248,0),22 },
+       { IPv4(198,96,2,0),24 },
+       { IPv4(198,96,3,0),24 },
+       { IPv4(198,96,18,0),23 },
+       { IPv4(198,96,46,0),23 },
+       { IPv4(198,96,48,0),21 },
+       { IPv4(198,96,56,0),22 },
+       { IPv4(198,96,60,0),24 },
+       { IPv4(198,96,62,0),24 },
+       { IPv4(198,96,80,0),22 },
+       { IPv4(198,96,113,0),24 },
+       { IPv4(198,96,127,0),24 },
+       { IPv4(198,96,131,0),24 },
+       { IPv4(198,96,185,0),24 },
+       { IPv4(198,96,188,0),24 },
+       { IPv4(198,96,199,0),24 },
+       { IPv4(198,96,223,0),24 },
+       { IPv4(198,96,251,0),24 },
+       { IPv4(198,97,44,0),24 },
+       { IPv4(198,97,52,0),23 },
+       { IPv4(198,97,67,0),24 },
+       { IPv4(198,97,70,0),23 },
+       { IPv4(198,97,70,0),24 },
+       { IPv4(198,97,72,0),21 },
+       { IPv4(198,97,72,0),24 },
+       { IPv4(198,97,79,0),24 },
+       { IPv4(198,97,80,0),24 },
+       { IPv4(198,97,80,0),20 },
+       { IPv4(198,97,81,0),24 },
+       { IPv4(198,97,82,0),23 },
+       { IPv4(198,97,84,0),22 },
+       { IPv4(198,97,88,0),24 },
+       { IPv4(198,97,93,0),24 },
+       { IPv4(198,97,96,0),19 },
+       { IPv4(198,97,108,0),24 },
+       { IPv4(198,97,110,0),24 },
+       { IPv4(198,97,128,0),18 },
+       { IPv4(198,97,135,0),24 },
+       { IPv4(198,97,138,0),24 },
+       { IPv4(198,97,143,0),24 },
+       { IPv4(198,97,144,0),24 },
+       { IPv4(198,97,151,0),24 },
+       { IPv4(198,97,155,0),24 },
+       { IPv4(198,97,192,0),20 },
+       { IPv4(198,97,208,0),24 },
+       { IPv4(198,97,208,0),23 },
+       { IPv4(198,97,209,0),24 },
+       { IPv4(198,97,234,0),23 },
+       { IPv4(198,97,236,0),24 },
+       { IPv4(198,97,240,0),20 },
+       { IPv4(198,98,83,0),24 },
+       { IPv4(198,99,85,0),24 },
+       { IPv4(198,99,88,0),24 },
+       { IPv4(198,99,89,0),24 },
+       { IPv4(198,99,90,0),24 },
+       { IPv4(198,99,106,0),24 },
+       { IPv4(198,99,107,0),24 },
+       { IPv4(198,99,108,0),24 },
+       { IPv4(198,99,110,0),24 },
+       { IPv4(198,99,115,0),24 },
+       { IPv4(198,99,146,0),24 },
+       { IPv4(198,99,191,0),24 },
+       { IPv4(198,99,201,0),24 },
+       { IPv4(198,99,225,0),24 },
+       { IPv4(198,99,239,0),24 },
+       { IPv4(198,99,244,0),24 },
+       { IPv4(198,101,4,0),22 },
+       { IPv4(198,101,23,0),24 },
+       { IPv4(198,101,24,0),24 },
+       { IPv4(198,101,32,0),20 },
+       { IPv4(198,102,1,0),24 },
+       { IPv4(198,102,2,0),23 },
+       { IPv4(198,102,66,0),24 },
+       { IPv4(198,102,67,0),24 },
+       { IPv4(198,102,85,0),24 },
+       { IPv4(198,102,86,0),23 },
+       { IPv4(198,102,88,0),24 },
+       { IPv4(198,102,91,0),24 },
+       { IPv4(198,102,103,0),24 },
+       { IPv4(198,102,112,0),24 },
+       { IPv4(198,102,117,0),24 },
+       { IPv4(198,102,147,0),24 },
+       { IPv4(198,102,157,0),24 },
+       { IPv4(198,102,172,0),24 },
+       { IPv4(198,102,186,0),23 },
+       { IPv4(198,102,188,0),23 },
+       { IPv4(198,102,188,0),22 },
+       { IPv4(198,102,190,0),23 },
+       { IPv4(198,102,192,0),23 },
+       { IPv4(198,102,192,0),22 },
+       { IPv4(198,102,194,0),23 },
+       { IPv4(198,102,196,0),24 },
+       { IPv4(198,102,196,0),23 },
+       { IPv4(198,102,198,0),23 },
+       { IPv4(198,102,201,0),24 },
+       { IPv4(198,102,206,0),24 },
+       { IPv4(198,102,211,0),24 },
+       { IPv4(198,102,244,0),24 },
+       { IPv4(198,102,253,0),24 },
+       { IPv4(198,102,254,0),24 },
+       { IPv4(198,103,0,0),16 },
+       { IPv4(198,103,1,0),24 },
+       { IPv4(198,103,15,0),24 },
+       { IPv4(198,103,18,0),24 },
+       { IPv4(198,103,22,0),24 },
+       { IPv4(198,103,37,0),24 },
+       { IPv4(198,103,41,0),24 },
+       { IPv4(198,103,42,0),24 },
+       { IPv4(198,103,45,0),24 },
+       { IPv4(198,103,49,0),24 },
+       { IPv4(198,103,53,0),24 },
+       { IPv4(198,103,55,0),24 },
+       { IPv4(198,103,56,0),24 },
+       { IPv4(198,103,61,0),24 },
+       { IPv4(198,103,63,0),24 },
+       { IPv4(198,103,92,0),24 },
+       { IPv4(198,103,93,0),24 },
+       { IPv4(198,103,94,0),24 },
+       { IPv4(198,103,95,0),24 },
+       { IPv4(198,103,96,0),24 },
+       { IPv4(198,103,97,0),24 },
+       { IPv4(198,103,98,0),24 },
+       { IPv4(198,103,99,0),24 },
+       { IPv4(198,103,101,0),24 },
+       { IPv4(198,103,103,0),24 },
+       { IPv4(198,103,104,0),24 },
+       { IPv4(198,103,108,0),24 },
+       { IPv4(198,103,109,0),24 },
+       { IPv4(198,103,111,0),24 },
+       { IPv4(198,103,138,0),24 },
+       { IPv4(198,103,140,0),24 },
+       { IPv4(198,103,143,0),24 },
+       { IPv4(198,103,145,0),24 },
+       { IPv4(198,103,146,0),24 },
+       { IPv4(198,103,147,0),24 },
+       { IPv4(198,103,152,0),24 },
+       { IPv4(198,103,153,0),24 },
+       { IPv4(198,103,154,0),24 },
+       { IPv4(198,103,161,0),24 },
+       { IPv4(198,103,162,0),24 },
+       { IPv4(198,103,164,0),24 },
+       { IPv4(198,103,167,0),24 },
+       { IPv4(198,103,169,0),24 },
+       { IPv4(198,103,171,0),24 },
+       { IPv4(198,103,172,0),24 },
+       { IPv4(198,103,174,0),24 },
+       { IPv4(198,103,176,0),24 },
+       { IPv4(198,103,177,0),24 },
+       { IPv4(198,103,180,0),24 },
+       { IPv4(198,103,185,0),24 },
+       { IPv4(198,103,186,0),24 },
+       { IPv4(198,103,191,0),24 },
+       { IPv4(198,103,193,0),24 },
+       { IPv4(198,103,194,0),24 },
+       { IPv4(198,103,195,0),24 },
+       { IPv4(198,103,196,0),24 },
+       { IPv4(198,103,198,0),24 },
+       { IPv4(198,103,206,0),24 },
+       { IPv4(198,103,208,0),24 },
+       { IPv4(198,103,211,0),24 },
+       { IPv4(198,103,213,0),24 },
+       { IPv4(198,103,214,0),24 },
+       { IPv4(198,103,215,0),24 },
+       { IPv4(198,103,216,0),24 },
+       { IPv4(198,103,217,0),24 },
+       { IPv4(198,103,218,0),24 },
+       { IPv4(198,103,220,0),24 },
+       { IPv4(198,103,222,0),24 },
+       { IPv4(198,103,234,0),24 },
+       { IPv4(198,103,235,0),24 },
+       { IPv4(198,103,236,0),24 },
+       { IPv4(198,103,237,0),24 },
+       { IPv4(198,103,238,0),24 },
+       { IPv4(198,103,241,0),24 },
+       { IPv4(198,103,242,0),24 },
+       { IPv4(198,103,244,0),24 },
+       { IPv4(198,103,245,0),24 },
+       { IPv4(198,103,248,0),24 },
+       { IPv4(198,103,249,0),24 },
+       { IPv4(198,103,250,0),24 },
+       { IPv4(198,104,0,0),16 },
+       { IPv4(198,105,2,0),24 },
+       { IPv4(198,105,32,0),20 },
+       { IPv4(198,105,64,0),20 },
+       { IPv4(198,106,0,0),15 },
+       { IPv4(198,108,16,0),22 },
+       { IPv4(198,111,96,0),24 },
+       { IPv4(198,112,169,0),24 },
+       { IPv4(198,112,200,0),23 },
+       { IPv4(198,113,60,0),24 },
+       { IPv4(198,113,61,0),24 },
+       { IPv4(198,116,0,0),14 },
+       { IPv4(198,118,206,0),24 },
+       { IPv4(198,119,23,0),24 },
+       { IPv4(198,119,24,0),24 },
+       { IPv4(198,119,27,0),25 },
+       { IPv4(198,120,0,0),14 },
+       { IPv4(198,124,0,0),14 },
+       { IPv4(198,128,0,0),14 },
+       { IPv4(198,133,16,0),24 },
+       { IPv4(198,133,16,0),20 },
+       { IPv4(198,133,36,0),24 },
+       { IPv4(198,133,77,0),24 },
+       { IPv4(198,133,79,0),24 },
+       { IPv4(198,133,120,0),24 },
+       { IPv4(198,133,123,0),24 },
+       { IPv4(198,133,146,0),24 },
+       { IPv4(198,133,170,0),24 },
+       { IPv4(198,133,178,0),23 },
+       { IPv4(198,133,180,0),22 },
+       { IPv4(198,133,185,0),24 },
+       { IPv4(198,133,198,0),24 },
+       { IPv4(198,133,199,0),24 },
+       { IPv4(198,133,206,0),24 },
+       { IPv4(198,133,219,0),24 },
+       { IPv4(198,133,233,0),24 },
+       { IPv4(198,133,237,0),24 },
+       { IPv4(198,133,242,0),24 },
+       { IPv4(198,133,244,0),23 },
+       { IPv4(198,133,246,0),24 },
+       { IPv4(198,134,143,0),24 },
+       { IPv4(198,134,148,0),24 },
+       { IPv4(198,134,158,0),23 },
+       { IPv4(198,134,196,0),24 },
+       { IPv4(198,135,0,0),22 },
+       { IPv4(198,135,4,0),22 },
+       { IPv4(198,135,68,0),24 },
+       { IPv4(198,135,78,0),24 },
+       { IPv4(198,135,110,0),24 },
+       { IPv4(198,135,118,0),23 },
+       { IPv4(198,135,153,0),24 },
+       { IPv4(198,135,222,0),24 },
+       { IPv4(198,136,8,0),21 },
+       { IPv4(198,136,139,0),24 },
+       { IPv4(198,136,160,0),24 },
+       { IPv4(198,136,186,0),24 },
+       { IPv4(198,136,201,0),24 },
+       { IPv4(198,136,226,0),24 },
+       { IPv4(198,136,229,0),24 },
+       { IPv4(198,136,233,0),24 },
+       { IPv4(198,136,243,0),24 },
+       { IPv4(198,136,250,0),24 },
+       { IPv4(198,137,70,0),24 },
+       { IPv4(198,137,99,0),24 },
+       { IPv4(198,137,140,0),24 },
+       { IPv4(198,137,142,0),24 },
+       { IPv4(198,137,143,0),24 },
+       { IPv4(198,137,147,0),24 },
+       { IPv4(198,137,151,0),24 },
+       { IPv4(198,137,152,0),23 },
+       { IPv4(198,137,170,0),24 },
+       { IPv4(198,137,181,0),24 },
+       { IPv4(198,137,182,0),23 },
+       { IPv4(198,137,186,0),24 },
+       { IPv4(198,137,187,0),24 },
+       { IPv4(198,137,194,0),24 },
+       { IPv4(198,137,199,0),24 },
+       { IPv4(198,137,200,0),24 },
+       { IPv4(198,137,202,0),24 },
+       { IPv4(198,137,221,0),24 },
+       { IPv4(198,137,249,0),24 },
+       { IPv4(198,137,254,0),24 },
+       { IPv4(198,138,0,0),15 },
+       { IPv4(198,138,53,0),24 },
+       { IPv4(198,138,54,0),23 },
+       { IPv4(198,138,56,0),22 },
+       { IPv4(198,138,60,0),24 },
+       { IPv4(198,138,103,0),24 },
+       { IPv4(198,139,122,0),24 },
+       { IPv4(198,139,128,0),24 },
+       { IPv4(198,139,237,0),24 },
+       { IPv4(198,140,0,0),22 },
+       { IPv4(198,140,58,0),23 },
+       { IPv4(198,140,63,0),24 },
+       { IPv4(198,140,134,0),24 },
+       { IPv4(198,140,179,0),24 },
+       { IPv4(198,140,180,0),24 },
+       { IPv4(198,140,189,0),24 },
+       { IPv4(198,140,215,0),24 },
+       { IPv4(198,143,8,0),24 },
+       { IPv4(198,143,13,0),24 },
+       { IPv4(198,143,16,0),24 },
+       { IPv4(198,143,17,0),24 },
+       { IPv4(198,143,18,0),24 },
+       { IPv4(198,143,19,0),24 },
+       { IPv4(198,143,20,0),24 },
+       { IPv4(198,143,21,0),24 },
+       { IPv4(198,143,22,0),24 },
+       { IPv4(198,143,24,0),24 },
+       { IPv4(198,144,128,0),20 },
+       { IPv4(198,144,135,0),24 },
+       { IPv4(198,144,192,0),20 },
+       { IPv4(198,147,0,0),20 },
+       { IPv4(198,147,37,0),24 },
+       { IPv4(198,147,38,0),24 },
+       { IPv4(198,147,75,0),24 },
+       { IPv4(198,147,81,0),24 },
+       { IPv4(198,147,91,0),24 },
+       { IPv4(198,147,128,0),24 },
+       { IPv4(198,147,137,0),24 },
+       { IPv4(198,147,142,0),23 },
+       { IPv4(198,147,147,0),24 },
+       { IPv4(198,147,150,0),24 },
+       { IPv4(198,147,151,0),24 },
+       { IPv4(198,147,157,0),24 },
+       { IPv4(198,147,162,0),24 },
+       { IPv4(198,147,175,0),24 },
+       { IPv4(198,147,200,0),23 },
+       { IPv4(198,147,219,0),24 },
+       { IPv4(198,147,224,0),24 },
+       { IPv4(198,147,246,0),24 },
+       { IPv4(198,148,166,0),24 },
+       { IPv4(198,148,175,0),24 },
+       { IPv4(198,148,190,0),24 },
+       { IPv4(198,148,205,0),24 },
+       { IPv4(198,148,206,0),24 },
+       { IPv4(198,148,209,0),24 },
+       { IPv4(198,148,239,0),24 },
+       { IPv4(198,148,251,0),24 },
+       { IPv4(198,149,2,0),24 },
+       { IPv4(198,149,172,0),22 },
+       { IPv4(198,151,130,0),24 },
+       { IPv4(198,151,137,0),24 },
+       { IPv4(198,151,139,0),24 },
+       { IPv4(198,151,149,0),24 },
+       { IPv4(198,151,160,0),24 },
+       { IPv4(198,151,170,0),24 },
+       { IPv4(198,151,171,0),24 },
+       { IPv4(198,151,172,0),24 },
+       { IPv4(198,151,175,0),24 },
+       { IPv4(198,151,200,0),22 },
+       { IPv4(198,151,212,0),24 },
+       { IPv4(198,151,230,0),23 },
+       { IPv4(198,151,248,0),24 },
+       { IPv4(198,152,185,0),24 },
+       { IPv4(198,153,8,0),21 },
+       { IPv4(198,153,20,0),22 },
+       { IPv4(198,153,31,0),24 },
+       { IPv4(198,153,132,0),24 },
+       { IPv4(198,153,146,0),24 },
+       { IPv4(198,153,152,0),24 },
+       { IPv4(198,153,219,0),24 },
+       { IPv4(198,153,232,0),24 },
+       { IPv4(198,154,2,0),23 },
+       { IPv4(198,154,8,0),21 },
+       { IPv4(198,154,16,0),24 },
+       { IPv4(198,154,18,0),24 },
+       { IPv4(198,154,19,0),24 },
+       { IPv4(198,154,20,0),24 },
+       { IPv4(198,154,21,0),24 },
+       { IPv4(198,154,22,0),24 },
+       { IPv4(198,154,23,0),24 },
+       { IPv4(198,154,24,0),23 },
+       { IPv4(198,154,24,0),24 },
+       { IPv4(198,154,25,0),24 },
+       { IPv4(198,154,64,0),21 },
+       { IPv4(198,154,72,0),22 },
+       { IPv4(198,154,77,0),24 },
+       { IPv4(198,154,128,0),19 },
+       { IPv4(198,154,150,0),24 },
+       { IPv4(198,154,160,0),20 },
+       { IPv4(198,154,173,0),24 },
+       { IPv4(198,154,174,0),23 },
+       { IPv4(198,154,176,0),23 },
+       { IPv4(198,155,0,0),16 },
+       { IPv4(198,160,18,0),24 },
+       { IPv4(198,160,140,0),24 },
+       { IPv4(198,160,177,0),24 },
+       { IPv4(198,160,178,0),24 },
+       { IPv4(198,160,179,0),24 },
+       { IPv4(198,160,180,0),24 },
+       { IPv4(198,160,196,0),24 },
+       { IPv4(198,160,197,0),24 },
+       { IPv4(198,160,246,0),24 },
+       { IPv4(198,160,250,0),24 },
+       { IPv4(198,160,252,0),24 },
+       { IPv4(198,161,2,0),24 },
+       { IPv4(198,161,22,0),24 },
+       { IPv4(198,161,23,0),24 },
+       { IPv4(198,161,82,0),24 },
+       { IPv4(198,161,83,0),24 },
+       { IPv4(198,161,180,0),24 },
+       { IPv4(198,161,208,0),24 },
+       { IPv4(198,161,210,0),24 },
+       { IPv4(198,161,211,0),24 },
+       { IPv4(198,161,216,0),24 },
+       { IPv4(198,161,246,0),23 },
+       { IPv4(198,162,70,0),24 },
+       { IPv4(198,162,158,0),23 },
+       { IPv4(198,162,232,0),22 },
+       { IPv4(198,163,115,0),24 },
+       { IPv4(198,163,184,0),21 },
+       { IPv4(198,163,192,0),21 },
+       { IPv4(198,163,200,0),21 },
+       { IPv4(198,164,3,0),24 },
+       { IPv4(198,164,7,0),24 },
+       { IPv4(198,165,18,0),24 },
+       { IPv4(198,165,39,0),24 },
+       { IPv4(198,165,53,0),24 },
+       { IPv4(198,165,56,0),23 },
+       { IPv4(198,165,59,0),24 },
+       { IPv4(198,165,60,0),23 },
+       { IPv4(198,165,62,0),24 },
+       { IPv4(198,165,72,0),23 },
+       { IPv4(198,165,162,0),23 },
+       { IPv4(198,165,185,0),24 },
+       { IPv4(198,167,160,0),24 },
+       { IPv4(198,167,161,0),24 },
+       { IPv4(198,167,162,0),24 },
+       { IPv4(198,167,163,0),24 },
+       { IPv4(198,169,171,0),24 },
+       { IPv4(198,169,181,0),24 },
+       { IPv4(198,169,182,0),24 },
+       { IPv4(198,169,183,0),24 },
+       { IPv4(198,169,184,0),24 },
+       { IPv4(198,170,0,0),15 },
+       { IPv4(198,170,186,0),24 },
+       { IPv4(198,170,208,0),24 },
+       { IPv4(198,172,0,0),15 },
+       { IPv4(198,174,0,0),16 },
+       { IPv4(198,174,1,0),24 },
+       { IPv4(198,174,2,0),23 },
+       { IPv4(198,174,4,0),23 },
+       { IPv4(198,174,6,0),24 },
+       { IPv4(198,174,8,0),24 },
+       { IPv4(198,174,48,0),24 },
+       { IPv4(198,174,49,0),24 },
+       { IPv4(198,174,50,0),24 },
+       { IPv4(198,174,51,0),24 },
+       { IPv4(198,174,52,0),24 },
+       { IPv4(198,174,55,0),24 },
+       { IPv4(198,174,65,0),24 },
+       { IPv4(198,174,66,0),23 },
+       { IPv4(198,174,68,0),22 },
+       { IPv4(198,174,72,0),21 },
+       { IPv4(198,174,80,0),20 },
+       { IPv4(198,174,120,0),24 },
+       { IPv4(198,174,121,0),24 },
+       { IPv4(198,174,122,0),23 },
+       { IPv4(198,174,124,0),22 },
+       { IPv4(198,174,127,0),24 },
+       { IPv4(198,174,128,0),22 },
+       { IPv4(198,174,128,0),24 },
+       { IPv4(198,174,132,0),24 },
+       { IPv4(198,174,169,0),24 },
+       { IPv4(198,174,176,0),20 },
+       { IPv4(198,174,217,0),24 },
+       { IPv4(198,174,218,0),23 },
+       { IPv4(198,174,220,0),22 },
+       { IPv4(198,174,224,0),21 },
+       { IPv4(198,174,232,0),24 },
+       { IPv4(198,175,9,0),24 },
+       { IPv4(198,175,11,0),24 },
+       { IPv4(198,175,14,0),24 },
+       { IPv4(198,175,47,0),24 },
+       { IPv4(198,175,48,0),24 },
+       { IPv4(198,175,49,0),24 },
+       { IPv4(198,175,56,0),24 },
+       { IPv4(198,175,57,0),24 },
+       { IPv4(198,175,60,0),24 },
+       { IPv4(198,175,62,0),23 },
+       { IPv4(198,175,62,0),24 },
+       { IPv4(198,175,68,0),24 },
+       { IPv4(198,175,70,0),23 },
+       { IPv4(198,175,72,0),24 },
+       { IPv4(198,175,76,0),24 },
+       { IPv4(198,175,149,0),24 },
+       { IPv4(198,175,158,0),24 },
+       { IPv4(198,175,187,0),24 },
+       { IPv4(198,175,194,0),23 },
+       { IPv4(198,175,196,0),22 },
+       { IPv4(198,175,202,0),24 },
+       { IPv4(198,175,203,0),24 },
+       { IPv4(198,175,204,0),24 },
+       { IPv4(198,175,212,0),22 },
+       { IPv4(198,175,236,0),24 },
+       { IPv4(198,175,240,0),24 },
+       { IPv4(198,175,250,0),24 },
+       { IPv4(198,176,16,0),24 },
+       { IPv4(198,176,17,0),24 },
+       { IPv4(198,176,20,0),24 },
+       { IPv4(198,176,21,0),24 },
+       { IPv4(198,176,160,0),24 },
+       { IPv4(198,176,170,0),24 },
+       { IPv4(198,176,174,0),24 },
+       { IPv4(198,176,184,0),24 },
+       { IPv4(198,176,193,0),24 },
+       { IPv4(198,176,199,0),24 },
+       { IPv4(198,176,204,0),24 },
+       { IPv4(198,176,217,0),24 },
+       { IPv4(198,176,225,0),24 },
+       { IPv4(198,176,247,0),24 },
+       { IPv4(198,177,11,0),24 },
+       { IPv4(198,177,12,0),24 },
+       { IPv4(198,177,13,0),24 },
+       { IPv4(198,177,14,0),24 },
+       { IPv4(198,177,15,0),24 },
+       { IPv4(198,177,32,0),20 },
+       { IPv4(198,177,48,0),22 },
+       { IPv4(198,177,169,0),24 },
+       { IPv4(198,177,170,0),24 },
+       { IPv4(198,177,171,0),24 },
+       { IPv4(198,177,172,0),24 },
+       { IPv4(198,177,173,0),24 },
+       { IPv4(198,177,174,0),24 },
+       { IPv4(198,177,180,0),23 },
+       { IPv4(198,177,181,0),24 },
+       { IPv4(198,177,191,0),24 },
+       { IPv4(198,177,192,0),22 },
+       { IPv4(198,177,196,0),23 },
+       { IPv4(198,177,224,0),24 },
+       { IPv4(198,177,229,0),24 },
+       { IPv4(198,178,8,0),24 },
+       { IPv4(198,178,9,0),24 },
+       { IPv4(198,178,32,0),20 },
+       { IPv4(198,178,32,0),21 },
+       { IPv4(198,178,40,0),21 },
+       { IPv4(198,178,48,0),22 },
+       { IPv4(198,178,48,0),21 },
+       { IPv4(198,178,52,0),22 },
+       { IPv4(198,178,129,0),24 },
+       { IPv4(198,178,148,0),24 },
+       { IPv4(198,178,167,0),24 },
+       { IPv4(198,178,186,0),24 },
+       { IPv4(198,178,215,0),24 },
+       { IPv4(198,178,217,0),24 },
+       { IPv4(198,178,226,0),24 },
+       { IPv4(198,178,232,0),24 },
+       { IPv4(198,178,234,0),23 },
+       { IPv4(198,178,236,0),22 },
+       { IPv4(198,178,254,0),24 },
+       { IPv4(198,179,16,0),24 },
+       { IPv4(198,179,128,0),24 },
+       { IPv4(198,179,140,0),24 },
+       { IPv4(198,179,169,0),24 },
+       { IPv4(198,179,170,0),24 },
+       { IPv4(198,179,171,0),24 },
+       { IPv4(198,179,172,0),24 },
+       { IPv4(198,179,173,0),24 },
+       { IPv4(198,179,201,0),24 },
+       { IPv4(198,179,208,0),24 },
+       { IPv4(198,179,214,0),24 },
+       { IPv4(198,179,232,0),24 },
+       { IPv4(198,179,239,0),24 },
+       { IPv4(198,179,246,0),24 },
+       { IPv4(198,179,248,0),24 },
+       { IPv4(198,180,16,0),20 },
+       { IPv4(198,180,36,0),24 },
+       { IPv4(198,180,49,0),24 },
+       { IPv4(198,180,67,0),24 },
+       { IPv4(198,180,129,0),24 },
+       { IPv4(198,180,136,0),24 },
+       { IPv4(198,180,141,0),24 },
+       { IPv4(198,180,147,0),24 },
+       { IPv4(198,180,161,0),24 },
+       { IPv4(198,180,162,0),24 },
+       { IPv4(198,180,182,0),24 },
+       { IPv4(198,180,183,0),24 },
+       { IPv4(198,180,191,0),24 },
+       { IPv4(198,180,205,0),24 },
+       { IPv4(198,180,215,0),24 },
+       { IPv4(198,180,219,0),24 },
+       { IPv4(198,180,225,0),24 },
+       { IPv4(198,180,252,0),24 },
+       { IPv4(198,181,4,0),22 },
+       { IPv4(198,181,8,0),24 },
+       { IPv4(198,181,17,0),24 },
+       { IPv4(198,181,18,0),23 },
+       { IPv4(198,181,156,0),24 },
+       { IPv4(198,181,161,0),24 },
+       { IPv4(198,181,175,0),24 },
+       { IPv4(198,181,219,0),24 },
+       { IPv4(198,181,242,0),24 },
+       { IPv4(198,181,243,0),24 },
+       { IPv4(198,181,250,0),24 },
+       { IPv4(198,182,8,0),21 },
+       { IPv4(198,182,16,0),24 },
+       { IPv4(198,182,21,0),24 },
+       { IPv4(198,182,24,0),24 },
+       { IPv4(198,182,25,0),24 },
+       { IPv4(198,182,26,0),24 },
+       { IPv4(198,182,28,0),24 },
+       { IPv4(198,182,31,0),24 },
+       { IPv4(198,182,76,0),24 },
+       { IPv4(198,182,88,0),24 },
+       { IPv4(198,182,89,0),24 },
+       { IPv4(198,182,90,0),24 },
+       { IPv4(198,182,91,0),24 },
+       { IPv4(198,182,96,0),24 },
+       { IPv4(198,182,97,0),24 },
+       { IPv4(198,182,98,0),24 },
+       { IPv4(198,182,99,0),24 },
+       { IPv4(198,182,106,0),24 },
+       { IPv4(198,182,107,0),24 },
+       { IPv4(198,182,130,0),24 },
+       { IPv4(198,182,131,0),24 },
+       { IPv4(198,182,132,0),24 },
+       { IPv4(198,182,133,0),24 },
+       { IPv4(198,182,134,0),24 },
+       { IPv4(198,182,140,0),24 },
+       { IPv4(198,182,176,0),22 },
+       { IPv4(198,182,178,0),24 },
+       { IPv4(198,182,180,0),23 },
+       { IPv4(198,182,196,0),24 },
+       { IPv4(198,182,200,0),24 },
+       { IPv4(198,182,201,0),24 },
+       { IPv4(198,182,220,0),24 },
+       { IPv4(198,182,225,0),24 },
+       { IPv4(198,182,239,0),24 },
+       { IPv4(198,183,8,0),21 },
+       { IPv4(198,183,10,0),24 },
+       { IPv4(198,183,128,0),22 },
+       { IPv4(198,183,139,0),24 },
+       { IPv4(198,183,146,0),23 },
+       { IPv4(198,183,157,0),24 },
+       { IPv4(198,183,160,0),22 },
+       { IPv4(198,183,164,0),24 },
+       { IPv4(198,183,165,0),24 },
+       { IPv4(198,183,166,0),24 },
+       { IPv4(198,183,167,0),24 },
+       { IPv4(198,183,217,0),24 },
+       { IPv4(198,183,218,0),24 },
+       { IPv4(198,183,241,0),24 },
+       { IPv4(198,184,66,0),24 },
+       { IPv4(198,184,69,0),24 },
+       { IPv4(198,184,85,0),24 },
+       { IPv4(198,184,93,0),24 },
+       { IPv4(198,184,118,0),24 },
+       { IPv4(198,184,121,0),24 },
+       { IPv4(198,184,126,0),24 },
+       { IPv4(198,184,127,0),24 },
+       { IPv4(198,184,134,0),24 },
+       { IPv4(198,184,147,0),24 },
+       { IPv4(198,184,150,0),24 },
+       { IPv4(198,184,152,0),23 },
+       { IPv4(198,184,171,0),24 },
+       { IPv4(198,184,210,0),24 },
+       { IPv4(198,184,211,0),24 },
+       { IPv4(198,184,227,0),24 },
+       { IPv4(198,185,4,0),22 },
+       { IPv4(198,185,10,0),24 },
+       { IPv4(198,185,22,0),24 },
+       { IPv4(198,185,70,0),24 },
+       { IPv4(198,185,72,0),24 },
+       { IPv4(198,185,73,0),24 },
+       { IPv4(198,185,104,0),24 },
+       { IPv4(198,185,133,0),24 },
+       { IPv4(198,185,134,0),23 },
+       { IPv4(198,185,136,0),23 },
+       { IPv4(198,185,163,0),24 },
+       { IPv4(198,185,184,0),24 },
+       { IPv4(198,185,205,0),24 },
+       { IPv4(198,185,207,0),24 },
+       { IPv4(198,185,234,0),24 },
+       { IPv4(198,185,236,0),24 },
+       { IPv4(198,186,48,0),22 },
+       { IPv4(198,186,52,0),24 },
+       { IPv4(198,186,53,0),24 },
+       { IPv4(198,186,63,0),24 },
+       { IPv4(198,186,64,0),24 },
+       { IPv4(198,186,145,0),24 },
+       { IPv4(198,186,151,0),24 },
+       { IPv4(198,186,160,0),24 },
+       { IPv4(198,186,167,0),24 },
+       { IPv4(198,186,184,0),24 },
+       { IPv4(198,186,200,0),22 },
+       { IPv4(198,186,212,0),23 },
+       { IPv4(198,186,213,0),24 },
+       { IPv4(198,186,214,0),24 },
+       { IPv4(198,186,237,0),24 },
+       { IPv4(198,187,135,0),24 },
+       { IPv4(198,187,136,0),24 },
+       { IPv4(198,187,156,0),24 },
+       { IPv4(198,187,203,0),24 },
+       { IPv4(198,187,204,0),24 },
+       { IPv4(198,187,215,0),24 },
+       { IPv4(198,187,216,0),24 },
+       { IPv4(198,187,220,0),24 },
+       { IPv4(198,187,247,0),24 },
+       { IPv4(198,187,252,0),24 },
+       { IPv4(198,188,0,0),16 },
+       { IPv4(198,188,7,0),24 },
+       { IPv4(198,188,8,0),24 },
+       { IPv4(198,188,49,0),24 },
+       { IPv4(198,188,50,0),24 },
+       { IPv4(198,188,160,0),19 },
+       { IPv4(198,188,192,0),20 },
+       { IPv4(198,188,208,0),23 },
+       { IPv4(198,188,211,0),24 },
+       { IPv4(198,188,212,0),22 },
+       { IPv4(198,188,216,0),21 },
+       { IPv4(198,188,224,0),20 },
+       { IPv4(198,188,240,0),21 },
+       { IPv4(198,188,252,0),23 },
+       { IPv4(198,189,0,0),16 },
+       { IPv4(198,190,28,0),24 },
+       { IPv4(198,190,147,0),24 },
+       { IPv4(198,190,166,0),24 },
+       { IPv4(198,190,182,0),24 },
+       { IPv4(198,190,187,0),24 },
+       { IPv4(198,190,195,0),24 },
+       { IPv4(198,190,201,0),24 },
+       { IPv4(198,190,216,0),24 },
+       { IPv4(198,190,219,0),24 },
+       { IPv4(198,190,247,0),24 },
+       { IPv4(198,190,249,0),24 },
+       { IPv4(198,190,250,0),23 },
+       { IPv4(198,190,252,0),24 },
+       { IPv4(198,199,9,0),24 },
+       { IPv4(198,199,13,0),24 },
+       { IPv4(198,199,128,0),24 },
+       { IPv4(198,199,136,0),24 },
+       { IPv4(198,199,168,0),24 },
+       { IPv4(198,199,179,0),24 },
+       { IPv4(198,199,187,0),24 },
+       { IPv4(198,199,191,0),24 },
+       { IPv4(198,199,199,0),24 },
+       { IPv4(198,199,206,0),24 },
+       { IPv4(198,199,219,0),24 },
+       { IPv4(198,199,224,0),24 },
+       { IPv4(198,199,237,0),24 },
+       { IPv4(198,200,32,0),21 },
+       { IPv4(198,200,139,0),24 },
+       { IPv4(198,200,147,0),24 },
+       { IPv4(198,200,171,0),24 },
+       { IPv4(198,200,174,0),24 },
+       { IPv4(198,200,182,0),24 },
+       { IPv4(198,200,184,0),24 },
+       { IPv4(198,200,195,0),24 },
+       { IPv4(198,200,228,0),24 },
+       { IPv4(198,201,4,0),23 },
+       { IPv4(198,201,5,0),24 },
+       { IPv4(198,201,6,0),24 },
+       { IPv4(198,201,23,0),24 },
+       { IPv4(198,202,33,0),24 },
+       { IPv4(198,202,64,0),21 },
+       { IPv4(198,202,64,0),18 },
+       { IPv4(198,202,65,0),24 },
+       { IPv4(198,202,66,0),24 },
+       { IPv4(198,202,67,0),24 },
+       { IPv4(198,202,68,0),24 },
+       { IPv4(198,202,69,0),24 },
+       { IPv4(198,202,70,0),24 },
+       { IPv4(198,202,71,0),24 },
+       { IPv4(198,202,72,0),21 },
+       { IPv4(198,202,72,0),24 },
+       { IPv4(198,202,73,0),24 },
+       { IPv4(198,202,74,0),24 },
+       { IPv4(198,202,75,0),24 },
+       { IPv4(198,202,76,0),24 },
+       { IPv4(198,202,79,0),24 },
+       { IPv4(198,202,80,0),24 },
+       { IPv4(198,202,80,0),20 },
+       { IPv4(198,202,81,0),24 },
+       { IPv4(198,202,84,0),24 },
+       { IPv4(198,202,85,0),24 },
+       { IPv4(198,202,86,0),24 },
+       { IPv4(198,202,87,0),24 },
+       { IPv4(198,202,96,0),19 },
+       { IPv4(198,202,139,0),24 },
+       { IPv4(198,202,144,0),24 },
+       { IPv4(198,202,145,0),24 },
+       { IPv4(198,202,148,0),24 },
+       { IPv4(198,202,150,0),24 },
+       { IPv4(198,202,162,0),24 },
+       { IPv4(198,202,168,0),24 },
+       { IPv4(198,202,174,0),24 },
+       { IPv4(198,202,177,0),24 },
+       { IPv4(198,202,182,0),24 },
+       { IPv4(198,202,200,0),22 },
+       { IPv4(198,202,201,0),24 },
+       { IPv4(198,202,202,0),24 },
+       { IPv4(198,202,204,0),24 },
+       { IPv4(198,202,217,0),24 },
+       { IPv4(198,202,228,0),23 },
+       { IPv4(198,202,235,0),24 },
+       { IPv4(198,202,243,0),24 },
+       { IPv4(198,203,9,0),24 },
+       { IPv4(198,203,11,0),24 },
+       { IPv4(198,203,13,0),24 },
+       { IPv4(198,203,16,0),21 },
+       { IPv4(198,203,24,0),23 },
+       { IPv4(198,203,32,0),20 },
+       { IPv4(198,203,48,0),20 },
+       { IPv4(198,203,145,0),24 },
+       { IPv4(198,203,173,0),24 },
+       { IPv4(198,203,178,0),24 },
+       { IPv4(198,203,191,0),24 },
+       { IPv4(198,203,192,0),24 },
+       { IPv4(198,203,219,0),24 },
+       { IPv4(198,203,246,0),24 },
+       { IPv4(198,204,22,0),24 },
+       { IPv4(198,204,92,0),24 },
+       { IPv4(198,204,104,0),24 },
+       { IPv4(198,204,116,0),22 },
+       { IPv4(198,204,120,0),23 },
+       { IPv4(198,204,122,0),24 },
+       { IPv4(198,204,133,0),24 },
+       { IPv4(198,204,134,0),24 },
+       { IPv4(198,204,138,0),24 },
+       { IPv4(198,204,141,0),24 },
+       { IPv4(198,204,142,0),24 },
+       { IPv4(198,205,14,0),24 },
+       { IPv4(198,206,16,0),20 },
+       { IPv4(198,206,47,0),24 },
+       { IPv4(198,206,131,0),24 },
+       { IPv4(198,206,134,0),24 },
+       { IPv4(198,206,162,0),24 },
+       { IPv4(198,206,175,0),24 },
+       { IPv4(198,206,193,0),24 },
+       { IPv4(198,206,194,0),24 },
+       { IPv4(198,206,222,0),24 },
+       { IPv4(198,206,223,0),24 },
+       { IPv4(198,206,234,0),23 },
+       { IPv4(198,206,236,0),24 },
+       { IPv4(198,206,239,0),24 },
+       { IPv4(198,206,240,0),23 },
+       { IPv4(198,206,242,0),24 },
+       { IPv4(198,206,243,0),24 },
+       { IPv4(198,206,246,0),24 },
+       { IPv4(198,206,247,0),24 },
+       { IPv4(198,207,8,0),21 },
+       { IPv4(198,207,153,0),24 },
+       { IPv4(198,207,168,0),24 },
+       { IPv4(198,207,169,0),24 },
+       { IPv4(198,207,176,0),24 },
+       { IPv4(198,207,179,0),24 },
+       { IPv4(198,207,185,0),24 },
+       { IPv4(198,207,193,0),24 },
+       { IPv4(198,207,196,0),24 },
+       { IPv4(198,207,229,0),24 },
+       { IPv4(198,207,230,0),23 },
+       { IPv4(198,207,232,0),24 },
+       { IPv4(198,207,237,0),24 },
+       { IPv4(198,207,238,0),24 },
+       { IPv4(198,207,239,0),24 },
+       { IPv4(198,207,240,0),24 },
+       { IPv4(198,207,241,0),24 },
+       { IPv4(198,208,6,0),24 },
+       { IPv4(198,208,23,0),24 },
+       { IPv4(198,208,28,0),24 },
+       { IPv4(198,208,223,0),24 },
+       { IPv4(198,209,0,0),19 },
+       { IPv4(198,209,32,0),19 },
+       { IPv4(198,209,64,0),19 },
+       { IPv4(198,209,96,0),19 },
+       { IPv4(198,209,128,0),19 },
+       { IPv4(198,209,160,0),19 },
+       { IPv4(198,209,192,0),19 },
+       { IPv4(198,209,224,0),19 },
+       { IPv4(198,211,0,0),16 },
+       { IPv4(198,211,40,0),24 },
+       { IPv4(198,211,54,0),24 },
+       { IPv4(198,211,56,0),23 },
+       { IPv4(198,211,65,0),24 },
+       { IPv4(198,211,124,0),24 },
+       { IPv4(198,212,166,0),24 },
+       { IPv4(198,212,176,0),24 },
+       { IPv4(198,212,187,0),24 },
+       { IPv4(198,212,194,0),23 },
+       { IPv4(198,212,196,0),23 },
+       { IPv4(198,212,199,0),24 },
+       { IPv4(198,212,205,0),24 },
+       { IPv4(198,212,206,0),24 },
+       { IPv4(198,212,207,0),24 },
+       { IPv4(198,212,218,0),24 },
+       { IPv4(198,212,246,0),24 },
+       { IPv4(198,212,251,0),24 },
+       { IPv4(198,217,216,0),21 },
+       { IPv4(198,217,224,0),24 },
+       { IPv4(198,218,0,0),16 },
+       { IPv4(198,218,204,0),24 },
+       { IPv4(198,220,0,0),16 },
+       { IPv4(198,222,0,0),16 },
+       { IPv4(198,223,29,0),24 },
+       { IPv4(198,223,30,0),23 },
+       { IPv4(198,223,97,0),24 },
+       { IPv4(198,223,101,0),24 },
+       { IPv4(198,223,102,0),24 },
+       { IPv4(198,223,106,0),24 },
+       { IPv4(198,223,128,0),21 },
+       { IPv4(198,223,160,0),19 },
+       { IPv4(198,228,0,0),19 },
+       { IPv4(198,228,192,0),18 },
+       { IPv4(198,232,32,0),19 },
+       { IPv4(198,232,129,0),24 },
+       { IPv4(198,232,136,0),24 },
+       { IPv4(198,232,144,0),24 },
+       { IPv4(198,232,168,0),23 },
+       { IPv4(198,232,211,0),24 },
+       { IPv4(198,232,214,0),24 },
+       { IPv4(198,232,215,0),24 },
+       { IPv4(198,232,216,0),24 },
+       { IPv4(198,232,217,0),24 },
+       { IPv4(198,232,236,0),24 },
+       { IPv4(198,232,237,0),24 },
+       { IPv4(198,232,238,0),24 },
+       { IPv4(198,232,239,0),24 },
+       { IPv4(198,233,0,0),16 },
+       { IPv4(198,233,27,0),24 },
+       { IPv4(198,234,0,0),16 },
+       { IPv4(198,235,23,0),24 },
+       { IPv4(198,235,56,0),24 },
+       { IPv4(198,235,157,0),24 },
+       { IPv4(198,235,177,0),24 },
+       { IPv4(198,235,180,0),22 },
+       { IPv4(198,235,202,0),24 },
+       { IPv4(198,235,203,0),24 },
+       { IPv4(198,235,204,0),24 },
+       { IPv4(198,235,205,0),24 },
+       { IPv4(198,236,0,0),21 },
+       { IPv4(198,236,8,0),23 },
+       { IPv4(198,236,21,0),24 },
+       { IPv4(198,236,22,0),23 },
+       { IPv4(198,236,24,0),21 },
+       { IPv4(198,236,32,0),20 },
+       { IPv4(198,236,48,0),21 },
+       { IPv4(198,236,56,0),24 },
+       { IPv4(198,236,128,0),19 },
+       { IPv4(198,237,208,0),20 },
+       { IPv4(198,237,224,0),19 },
+       { IPv4(198,240,129,0),24 },
+       { IPv4(198,240,130,0),24 },
+       { IPv4(198,241,201,0),24 },
+       { IPv4(198,241,202,0),24 },
+       { IPv4(198,241,202,0),23 },
+       { IPv4(198,241,203,0),24 },
+       { IPv4(198,241,204,0),24 },
+       { IPv4(198,242,23,0),24 },
+       { IPv4(198,242,56,0),24 },
+       { IPv4(198,242,57,0),24 },
+       { IPv4(198,242,58,0),24 },
+       { IPv4(198,242,109,0),24 },
+       { IPv4(198,242,111,0),24 },
+       { IPv4(198,242,208,0),22 },
+       { IPv4(198,242,212,0),24 },
+       { IPv4(198,242,213,0),24 },
+       { IPv4(198,242,214,0),23 },
+       { IPv4(198,242,216,0),24 },
+       { IPv4(198,243,0,0),16 },
+       { IPv4(198,243,69,0),24 },
+       { IPv4(198,243,127,0),24 },
+       { IPv4(198,243,128,0),17 },
+       { IPv4(198,243,153,0),24 },
+       { IPv4(198,243,180,0),24 },
+       { IPv4(198,245,32,0),21 },
+       { IPv4(198,245,40,0),23 },
+       { IPv4(198,245,140,0),24 },
+       { IPv4(198,245,183,0),24 },
+       { IPv4(198,245,204,0),24 },
+       { IPv4(198,245,206,0),24 },
+       { IPv4(198,245,210,0),24 },
+       { IPv4(198,245,211,0),24 },
+       { IPv4(198,245,214,0),24 },
+       { IPv4(198,246,0,0),21 },
+       { IPv4(198,246,16,0),21 },
+       { IPv4(198,246,24,0),23 },
+       { IPv4(198,246,32,0),21 },
+       { IPv4(198,246,132,0),23 },
+       { IPv4(198,246,192,0),24 },
+       { IPv4(198,246,200,0),24 },
+       { IPv4(198,246,227,0),24 },
+       { IPv4(198,246,233,0),24 },
+       { IPv4(198,246,237,0),24 },
+       { IPv4(198,246,246,0),24 },
+       { IPv4(198,246,254,0),24 },
+       { IPv4(198,247,0,0),16 },
+       { IPv4(198,247,48,0),20 },
+       { IPv4(198,247,96,0),19 },
+       { IPv4(198,247,128,0),19 },
+       { IPv4(198,247,184,0),22 },
+       { IPv4(198,247,232,0),23 },
+       { IPv4(198,247,234,0),24 },
+       { IPv4(198,247,236,0),22 },
+       { IPv4(198,248,64,0),19 },
+       { IPv4(198,248,96,0),19 },
+       { IPv4(198,249,61,0),24 },
+       { IPv4(198,250,64,0),19 },
+       { IPv4(198,250,128,0),18 },
+       { IPv4(198,250,180,0),24 },
+       { IPv4(198,250,192,0),19 },
+       { IPv4(198,250,202,0),24 },
+       { IPv4(198,250,203,0),24 },
+       { IPv4(198,250,204,0),24 },
+       { IPv4(198,250,224,0),20 },
+       { IPv4(198,250,240,0),21 },
+       { IPv4(198,250,248,0),22 },
+       { IPv4(198,252,8,0),21 },
+       { IPv4(198,252,32,0),19 },
+       { IPv4(198,252,143,0),24 },
+       { IPv4(198,252,175,0),24 },
+       { IPv4(198,252,182,0),24 },
+       { IPv4(198,252,186,0),24 },
+       { IPv4(198,252,189,0),24 },
+       { IPv4(198,252,190,0),24 },
+       { IPv4(198,252,191,0),24 },
+       { IPv4(198,252,192,0),24 },
+       { IPv4(198,252,208,0),23 },
+       { IPv4(198,252,214,0),24 },
+       { IPv4(198,252,232,0),24 },
+       { IPv4(198,252,237,0),24 },
+       { IPv4(198,252,240,0),23 },
+       { IPv4(198,252,244,0),24 },
+       { IPv4(198,253,0,0),16 },
+       { IPv4(198,253,0,0),20 },
+       { IPv4(198,253,16,0),20 },
+       { IPv4(198,253,40,0),21 },
+       { IPv4(198,253,58,0),24 },
+       { IPv4(198,253,60,0),24 },
+       { IPv4(198,253,71,0),24 },
+       { IPv4(198,253,72,0),21 },
+       { IPv4(198,253,80,0),20 },
+       { IPv4(198,253,99,0),24 },
+       { IPv4(198,253,100,0),22 },
+       { IPv4(198,253,104,0),22 },
+       { IPv4(198,253,110,0),24 },
+       { IPv4(198,253,111,0),24 },
+       { IPv4(198,253,112,0),23 },
+       { IPv4(198,253,114,0),24 },
+       { IPv4(198,253,116,0),24 },
+       { IPv4(198,253,117,0),24 },
+       { IPv4(198,253,118,0),23 },
+       { IPv4(198,253,120,0),24 },
+       { IPv4(198,253,122,0),23 },
+       { IPv4(198,253,124,0),22 },
+       { IPv4(198,253,128,0),21 },
+       { IPv4(198,253,136,0),21 },
+       { IPv4(198,253,147,0),24 },
+       { IPv4(198,253,148,0),23 },
+       { IPv4(198,253,158,0),24 },
+       { IPv4(198,253,161,0),24 },
+       { IPv4(198,253,163,0),24 },
+       { IPv4(198,253,166,0),23 },
+       { IPv4(198,253,168,0),22 },
+       { IPv4(198,253,173,0),24 },
+       { IPv4(198,253,174,0),24 },
+       { IPv4(198,253,175,0),24 },
+       { IPv4(198,253,177,0),24 },
+       { IPv4(198,253,178,0),24 },
+       { IPv4(198,253,184,0),24 },
+       { IPv4(198,253,185,0),24 },
+       { IPv4(198,253,186,0),24 },
+       { IPv4(198,253,187,0),24 },
+       { IPv4(198,253,188,0),22 },
+       { IPv4(198,253,192,0),22 },
+       { IPv4(198,253,196,0),24 },
+       { IPv4(198,253,198,0),24 },
+       { IPv4(198,253,199,0),24 },
+       { IPv4(198,253,200,0),24 },
+       { IPv4(198,253,204,0),23 },
+       { IPv4(198,253,206,0),24 },
+       { IPv4(198,253,213,0),24 },
+       { IPv4(198,253,225,0),24 },
+       { IPv4(198,253,226,0),24 },
+       { IPv4(198,253,232,0),23 },
+       { IPv4(198,253,243,0),24 },
+       { IPv4(198,253,246,0),23 },
+       { IPv4(198,253,253,0),24 },
+       { IPv4(198,253,254,0),24 },
+       { IPv4(198,253,255,0),24 },
+       { IPv4(198,254,0,0),20 },
+       { IPv4(199,0,8,0),24 },
+       { IPv4(199,1,1,0),24 },
+       { IPv4(199,1,91,0),24 },
+       { IPv4(199,1,156,0),24 },
+       { IPv4(199,1,157,0),24 },
+       { IPv4(199,1,204,0),22 },
+       { IPv4(199,2,8,0),21 },
+       { IPv4(199,2,16,0),20 },
+       { IPv4(199,2,50,0),24 },
+       { IPv4(199,2,64,0),19 },
+       { IPv4(199,2,135,0),24 },
+       { IPv4(199,2,137,0),24 },
+       { IPv4(199,2,139,0),24 },
+       { IPv4(199,3,10,0),23 },
+       { IPv4(199,3,12,0),24 },
+       { IPv4(199,3,109,0),24 },
+       { IPv4(199,3,182,0),24 },
+       { IPv4(199,3,240,0),24 },
+       { IPv4(199,4,48,0),22 },
+       { IPv4(199,4,56,0),22 },
+       { IPv4(199,4,57,0),24 },
+       { IPv4(199,4,58,0),24 },
+       { IPv4(199,4,64,0),18 },
+       { IPv4(199,4,140,0),22 },
+       { IPv4(199,4,146,0),23 },
+       { IPv4(199,4,151,0),24 },
+       { IPv4(199,4,154,0),24 },
+       { IPv4(199,4,164,0),22 },
+       { IPv4(199,4,187,0),24 },
+       { IPv4(199,4,191,0),24 },
+       { IPv4(199,4,194,0),24 },
+       { IPv4(199,4,220,0),24 },
+       { IPv4(199,4,225,0),24 },
+       { IPv4(199,4,235,0),24 },
+       { IPv4(199,4,246,0),23 },
+       { IPv4(199,4,249,0),24 },
+       { IPv4(199,4,250,0),23 },
+       { IPv4(199,4,252,0),24 },
+       { IPv4(199,4,253,0),24 },
+       { IPv4(199,5,8,0),21 },
+       { IPv4(199,5,16,0),24 },
+       { IPv4(199,5,60,0),24 },
+       { IPv4(199,5,61,0),24 },
+       { IPv4(199,5,133,0),24 },
+       { IPv4(199,5,163,0),24 },
+       { IPv4(199,5,174,0),24 },
+       { IPv4(199,5,176,0),23 },
+       { IPv4(199,5,178,0),24 },
+       { IPv4(199,5,179,0),24 },
+       { IPv4(199,5,180,0),24 },
+       { IPv4(199,5,181,0),24 },
+       { IPv4(199,5,182,0),24 },
+       { IPv4(199,5,202,0),24 },
+       { IPv4(199,5,204,0),22 },
+       { IPv4(199,5,208,0),22 },
+       { IPv4(199,5,225,0),24 },
+       { IPv4(199,5,231,0),24 },
+       { IPv4(199,5,232,0),24 },
+       { IPv4(199,5,243,0),24 },
+       { IPv4(199,5,254,0),24 },
+       { IPv4(199,6,98,0),24 },
+       { IPv4(199,6,127,0),24 },
+       { IPv4(199,9,0,0),24 },
+       { IPv4(199,9,1,0),24 },
+       { IPv4(199,9,2,0),24 },
+       { IPv4(199,9,10,0),23 },
+       { IPv4(199,9,16,0),22 },
+       { IPv4(199,9,64,0),18 },
+       { IPv4(199,10,0,0),16 },
+       { IPv4(199,10,16,0),21 },
+       { IPv4(199,10,50,0),24 },
+       { IPv4(199,10,62,0),24 },
+       { IPv4(199,10,67,0),24 },
+       { IPv4(199,10,77,0),24 },
+       { IPv4(199,10,78,0),23 },
+       { IPv4(199,10,80,0),24 },
+       { IPv4(199,10,81,0),24 },
+       { IPv4(199,10,93,0),24 },
+       { IPv4(199,10,119,0),24 },
+       { IPv4(199,10,127,0),24 },
+       { IPv4(199,10,133,0),24 },
+       { IPv4(199,10,135,0),24 },
+       { IPv4(199,10,138,0),24 },
+       { IPv4(199,10,139,0),24 },
+       { IPv4(199,10,141,0),24 },
+       { IPv4(199,10,142,0),24 },
+       { IPv4(199,10,148,0),24 },
+       { IPv4(199,10,200,0),24 },
+       { IPv4(199,10,215,0),24 },
+       { IPv4(199,10,231,0),24 },
+       { IPv4(199,10,233,0),24 },
+       { IPv4(199,15,0,0),21 },
+       { IPv4(199,15,60,0),22 },
+       { IPv4(199,16,32,0),19 },
+       { IPv4(199,17,0,0),16 },
+       { IPv4(199,19,8,0),24 },
+       { IPv4(199,19,9,0),24 },
+       { IPv4(199,20,8,0),21 },
+       { IPv4(199,20,51,0),24 },
+       { IPv4(199,20,56,0),24 },
+       { IPv4(199,20,59,0),24 },
+       { IPv4(199,20,64,0),18 },
+       { IPv4(199,21,28,0),22 },
+       { IPv4(199,22,0,0),16 },
+       { IPv4(199,26,8,0),21 },
+       { IPv4(199,26,153,0),24 },
+       { IPv4(199,26,155,0),24 },
+       { IPv4(199,26,165,0),24 },
+       { IPv4(199,26,171,0),24 },
+       { IPv4(199,26,199,0),24 },
+       { IPv4(199,26,202,0),24 },
+       { IPv4(199,26,225,0),24 },
+       { IPv4(199,29,1,0),24 },
+       { IPv4(199,29,2,0),24 },
+       { IPv4(199,29,3,0),24 },
+       { IPv4(199,29,6,0),24 },
+       { IPv4(199,29,7,0),24 },
+       { IPv4(199,29,8,0),24 },
+       { IPv4(199,29,9,0),24 },
+       { IPv4(199,29,31,0),24 },
+       { IPv4(199,29,68,0),24 },
+       { IPv4(199,29,92,0),22 },
+       { IPv4(199,29,132,0),24 },
+       { IPv4(199,29,141,0),24 },
+       { IPv4(199,29,144,0),20 },
+       { IPv4(199,29,184,0),24 },
+       { IPv4(199,29,196,0),23 },
+       { IPv4(199,29,201,0),24 },
+       { IPv4(199,29,202,0),24 },
+       { IPv4(199,29,203,0),24 },
+       { IPv4(199,29,204,0),24 },
+       { IPv4(199,29,205,0),24 },
+       { IPv4(199,29,206,0),23 },
+       { IPv4(199,29,208,0),23 },
+       { IPv4(199,29,210,0),24 },
+       { IPv4(199,29,211,0),24 },
+       { IPv4(199,29,212,0),24 },
+       { IPv4(199,29,213,0),24 },
+       { IPv4(199,29,214,0),24 },
+       { IPv4(199,29,215,0),24 },
+       { IPv4(199,29,216,0),22 },
+       { IPv4(199,29,220,0),22 },
+       { IPv4(199,29,242,0),24 },
+       { IPv4(199,29,245,0),24 },
+       { IPv4(199,29,246,0),24 },
+       { IPv4(199,29,247,0),24 },
+       { IPv4(199,29,255,0),24 },
+       { IPv4(199,30,4,0),24 },
+       { IPv4(199,30,32,0),24 },
+       { IPv4(199,31,2,0),24 },
+       { IPv4(199,31,3,0),24 },
+       { IPv4(199,31,8,0),22 },
+       { IPv4(199,31,12,0),24 },
+       { IPv4(199,31,21,0),24 },
+       { IPv4(199,31,31,0),24 },
+       { IPv4(199,31,107,0),24 },
+       { IPv4(199,32,128,0),18 },
+       { IPv4(199,32,192,0),19 },
+       { IPv4(199,33,32,0),19 },
+       { IPv4(199,33,64,0),24 },
+       { IPv4(199,33,79,0),24 },
+       { IPv4(199,33,81,0),24 },
+       { IPv4(199,33,119,0),24 },
+       { IPv4(199,33,128,0),24 },
+       { IPv4(199,33,129,0),24 },
+       { IPv4(199,33,144,0),24 },
+       { IPv4(199,33,159,0),24 },
+       { IPv4(199,33,164,0),23 },
+       { IPv4(199,33,164,0),24 },
+       { IPv4(199,33,165,0),24 },
+       { IPv4(199,33,166,0),24 },
+       { IPv4(199,33,167,0),24 },
+       { IPv4(199,33,168,0),24 },
+       { IPv4(199,33,169,0),24 },
+       { IPv4(199,33,170,0),24 },
+       { IPv4(199,33,171,0),24 },
+       { IPv4(199,33,172,0),24 },
+       { IPv4(199,33,173,0),24 },
+       { IPv4(199,33,182,0),24 },
+       { IPv4(199,33,203,0),24 },
+       { IPv4(199,33,206,0),24 },
+       { IPv4(199,33,217,0),24 },
+       { IPv4(199,33,223,0),24 },
+       { IPv4(199,33,224,0),23 },
+       { IPv4(199,33,238,0),24 },
+       { IPv4(199,33,244,0),24 },
+       { IPv4(199,33,252,0),24 },
+       { IPv4(199,34,16,0),20 },
+       { IPv4(199,34,32,0),24 },
+       { IPv4(199,34,53,0),24 },
+       { IPv4(199,34,138,0),23 },
+       { IPv4(199,34,167,0),24 },
+       { IPv4(199,34,173,0),24 },
+       { IPv4(199,34,183,0),24 },
+       { IPv4(199,34,216,0),23 },
+       { IPv4(199,35,0,0),16 },
+       { IPv4(199,36,19,0),24 },
+       { IPv4(199,36,24,0),24 },
+       { IPv4(199,36,25,0),24 },
+       { IPv4(199,37,0,0),17 },
+       { IPv4(199,37,96,0),24 },
+       { IPv4(199,37,112,0),24 },
+       { IPv4(199,37,116,0),24 },
+       { IPv4(199,37,128,0),24 },
+       { IPv4(199,37,129,0),24 },
+       { IPv4(199,37,130,0),24 },
+       { IPv4(199,37,131,0),24 },
+       { IPv4(199,37,132,0),24 },
+       { IPv4(199,37,133,0),24 },
+       { IPv4(199,37,138,0),24 },
+       { IPv4(199,37,148,0),24 },
+       { IPv4(199,37,158,0),24 },
+       { IPv4(199,37,159,0),24 },
+       { IPv4(199,37,160,0),24 },
+       { IPv4(199,37,161,0),24 },
+       { IPv4(199,37,162,0),24 },
+       { IPv4(199,37,163,0),24 },
+       { IPv4(199,37,164,0),24 },
+       { IPv4(199,37,165,0),24 },
+       { IPv4(199,37,170,0),24 },
+       { IPv4(199,37,173,0),24 },
+       { IPv4(199,37,180,0),24 },
+       { IPv4(199,37,181,0),24 },
+       { IPv4(199,37,192,0),18 },
+       { IPv4(199,37,204,0),24 },
+       { IPv4(199,37,205,0),24 },
+       { IPv4(199,37,213,0),24 },
+       { IPv4(199,37,214,0),24 },
+       { IPv4(199,37,219,0),24 },
+       { IPv4(199,38,0,0),24 },
+       { IPv4(199,38,1,0),24 },
+       { IPv4(199,38,2,0),24 },
+       { IPv4(199,38,3,0),24 },
+       { IPv4(199,38,4,0),24 },
+       { IPv4(199,38,5,0),24 },
+       { IPv4(199,38,6,0),24 },
+       { IPv4(199,38,7,0),24 },
+       { IPv4(199,38,32,0),20 },
+       { IPv4(199,38,48,0),22 },
+       { IPv4(199,38,133,0),24 },
+       { IPv4(199,41,3,0),24 },
+       { IPv4(199,41,8,0),23 },
+       { IPv4(199,41,127,0),24 },
+       { IPv4(199,41,196,0),24 },
+       { IPv4(199,41,197,0),24 },
+       { IPv4(199,41,198,0),23 },
+       { IPv4(199,41,200,0),22 },
+       { IPv4(199,41,248,0),24 },
+       { IPv4(199,41,254,0),24 },
+       { IPv4(199,42,104,0),23 },
+       { IPv4(199,42,240,0),23 },
+       { IPv4(199,43,32,0),24 },
+       { IPv4(199,43,33,0),24 },
+       { IPv4(199,43,34,0),24 },
+       { IPv4(199,43,35,0),24 },
+       { IPv4(199,43,48,0),24 },
+       { IPv4(199,43,49,0),24 },
+       { IPv4(199,43,51,0),24 },
+       { IPv4(199,43,117,0),24 },
+       { IPv4(199,43,172,0),24 },
+       { IPv4(199,45,66,0),23 },
+       { IPv4(199,45,68,0),24 },
+       { IPv4(199,45,70,0),24 },
+       { IPv4(199,45,84,0),24 },
+       { IPv4(199,45,88,0),24 },
+       { IPv4(199,45,123,0),24 },
+       { IPv4(199,45,128,0),17 },
+       { IPv4(199,45,150,0),24 },
+       { IPv4(199,46,8,0),21 },
+       { IPv4(199,46,16,0),20 },
+       { IPv4(199,46,16,0),23 },
+       { IPv4(199,46,18,0),23 },
+       { IPv4(199,46,128,0),17 },
+       { IPv4(199,46,200,0),24 },
+       { IPv4(199,46,245,0),24 },
+       { IPv4(199,46,255,0),24 },
+       { IPv4(199,48,4,0),22 },
+       { IPv4(199,48,22,0),24 },
+       { IPv4(199,48,23,0),24 },
+       { IPv4(199,48,24,0),24 },
+       { IPv4(199,49,3,0),24 },
+       { IPv4(199,49,22,0),24 },
+       { IPv4(199,49,39,0),24 },
+       { IPv4(199,49,70,0),24 },
+       { IPv4(199,50,26,0),24 },
+       { IPv4(199,50,29,0),24 },
+       { IPv4(199,51,77,0),24 },
+       { IPv4(199,51,99,0),24 },
+       { IPv4(199,51,112,0),24 },
+       { IPv4(199,53,16,0),24 },
+       { IPv4(199,53,17,0),24 },
+       { IPv4(199,53,19,0),24 },
+       { IPv4(199,53,20,0),24 },
+       { IPv4(199,53,22,0),24 },
+       { IPv4(199,53,23,0),24 },
+       { IPv4(199,53,74,0),24 },
+       { IPv4(199,53,77,0),24 },
+       { IPv4(199,53,78,0),24 },
+       { IPv4(199,53,98,0),24 },
+       { IPv4(199,53,100,0),24 },
+       { IPv4(199,53,102,0),23 },
+       { IPv4(199,53,183,0),24 },
+       { IPv4(199,53,184,0),24 },
+       { IPv4(199,57,0,0),16 },
+       { IPv4(199,58,52,0),24 },
+       { IPv4(199,59,40,0),24 },
+       { IPv4(199,60,103,0),24 },
+       { IPv4(199,60,237,0),24 },
+       { IPv4(199,64,0,0),24 },
+       { IPv4(199,64,1,0),24 },
+       { IPv4(199,64,7,0),24 },
+       { IPv4(199,64,8,0),24 },
+       { IPv4(199,65,196,0),24 },
+       { IPv4(199,66,1,0),24 },
+       { IPv4(199,66,10,0),24 },
+       { IPv4(199,67,0,0),24 },
+       { IPv4(199,67,7,0),24 },
+       { IPv4(199,67,16,0),20 },
+       { IPv4(199,67,190,0),24 },
+       { IPv4(199,68,35,0),24 },
+       { IPv4(199,68,81,0),24 },
+       { IPv4(199,69,0,0),16 },
+       { IPv4(199,69,32,0),24 },
+       { IPv4(199,70,5,0),24 },
+       { IPv4(199,70,128,0),24 },
+       { IPv4(199,70,144,0),24 },
+       { IPv4(199,70,148,0),24 },
+       { IPv4(199,71,27,0),24 },
+       { IPv4(199,71,40,0),24 },
+       { IPv4(199,71,52,0),24 },
+       { IPv4(199,71,68,0),24 },
+       { IPv4(199,71,115,0),24 },
+       { IPv4(199,71,175,0),24 },
+       { IPv4(199,71,182,0),23 },
+       { IPv4(199,71,187,0),24 },
+       { IPv4(199,71,188,0),24 },
+       { IPv4(199,73,20,0),24 },
+       { IPv4(199,73,32,0),21 },
+       { IPv4(199,73,40,0),23 },
+       { IPv4(199,74,8,0),21 },
+       { IPv4(199,74,141,0),24 },
+       { IPv4(199,74,142,0),24 },
+       { IPv4(199,74,198,0),24 },
+       { IPv4(199,74,206,0),24 },
+       { IPv4(199,74,211,0),24 },
+       { IPv4(199,74,216,0),22 },
+       { IPv4(199,74,220,0),23 },
+       { IPv4(199,74,242,0),24 },
+       { IPv4(199,76,144,0),20 },
+       { IPv4(199,76,160,0),19 },
+       { IPv4(199,76,192,0),24 },
+       { IPv4(199,76,198,0),24 },
+       { IPv4(199,77,44,0),23 },
+       { IPv4(199,77,46,0),24 },
+       { IPv4(199,77,128,0),17 },
+       { IPv4(199,78,60,0),23 },
+       { IPv4(199,79,64,0),19 },
+       { IPv4(199,79,131,0),24 },
+       { IPv4(199,79,136,0),24 },
+       { IPv4(199,79,142,0),24 },
+       { IPv4(199,79,143,0),24 },
+       { IPv4(199,79,144,0),24 },
+       { IPv4(199,79,155,0),24 },
+       { IPv4(199,79,168,0),22 },
+       { IPv4(199,79,186,0),24 },
+       { IPv4(199,79,200,0),24 },
+       { IPv4(199,79,202,0),24 },
+       { IPv4(199,79,215,0),24 },
+       { IPv4(199,79,228,0),24 },
+       { IPv4(199,79,236,0),24 },
+       { IPv4(199,79,250,0),23 },
+       { IPv4(199,79,252,0),23 },
+       { IPv4(199,79,254,0),24 },
+       { IPv4(199,80,50,0),24 },
+       { IPv4(199,80,128,0),17 },
+       { IPv4(199,81,0,0),16 },
+       { IPv4(199,81,192,0),19 },
+       { IPv4(199,82,0,0),16 },
+       { IPv4(199,83,64,0),20 },
+       { IPv4(199,84,1,0),24 },
+       { IPv4(199,84,52,0),24 },
+       { IPv4(199,84,53,0),24 },
+       { IPv4(199,84,54,0),24 },
+       { IPv4(199,84,135,0),24 },
+       { IPv4(199,84,152,0),24 },
+       { IPv4(199,84,172,0),24 },
+       { IPv4(199,84,174,0),23 },
+       { IPv4(199,85,7,0),24 },
+       { IPv4(199,85,9,0),24 },
+       { IPv4(199,85,19,0),24 },
+       { IPv4(199,85,25,0),24 },
+       { IPv4(199,85,107,0),24 },
+       { IPv4(199,85,245,0),24 },
+       { IPv4(199,86,0,0),16 },
+       { IPv4(199,86,16,0),22 },
+       { IPv4(199,86,27,0),24 },
+       { IPv4(199,86,68,0),22 },
+       { IPv4(199,86,128,0),17 },
+       { IPv4(199,87,16,0),21 },
+       { IPv4(199,87,24,0),23 },
+       { IPv4(199,88,14,0),24 },
+       { IPv4(199,88,104,0),23 },
+       { IPv4(199,88,132,0),24 },
+       { IPv4(199,88,134,0),23 },
+       { IPv4(199,88,136,0),23 },
+       { IPv4(199,88,145,0),24 },
+       { IPv4(199,88,147,0),24 },
+       { IPv4(199,88,158,0),24 },
+       { IPv4(199,88,171,0),24 },
+       { IPv4(199,88,179,0),24 },
+       { IPv4(199,88,187,0),24 },
+       { IPv4(199,88,205,0),24 },
+       { IPv4(199,88,232,0),24 },
+       { IPv4(199,88,234,0),24 },
+       { IPv4(199,88,235,0),24 },
+       { IPv4(199,88,249,0),24 },
+       { IPv4(199,89,0,0),21 },
+       { IPv4(199,89,8,0),21 },
+       { IPv4(199,89,64,0),18 },
+       { IPv4(199,89,128,0),24 },
+       { IPv4(199,89,140,0),24 },
+       { IPv4(199,89,163,0),24 },
+       { IPv4(199,89,187,0),24 },
+       { IPv4(199,89,192,0),23 },
+       { IPv4(199,89,214,0),24 },
+       { IPv4(199,89,224,0),24 },
+       { IPv4(199,89,233,0),24 },
+       { IPv4(199,89,234,0),24 },
+       { IPv4(199,89,248,0),24 },
+       { IPv4(199,89,253,0),24 },
+       { IPv4(199,91,32,0),24 },
+       { IPv4(199,91,33,0),24 },
+       { IPv4(199,91,34,0),24 },
+       { IPv4(199,91,35,0),24 },
+       { IPv4(199,91,36,0),24 },
+       { IPv4(199,91,37,0),24 },
+       { IPv4(199,91,38,0),24 },
+       { IPv4(199,91,39,0),24 },
+       { IPv4(199,93,70,0),24 },
+       { IPv4(199,93,71,0),24 },
+       { IPv4(199,96,19,0),24 },
+       { IPv4(199,96,32,0),22 },
+       { IPv4(199,96,40,0),22 },
+       { IPv4(199,96,44,0),23 },
+       { IPv4(199,96,46,0),24 },
+       { IPv4(199,97,12,0),24 },
+       { IPv4(199,97,48,0),24 },
+       { IPv4(199,97,98,0),24 },
+       { IPv4(199,97,121,0),24 },
+       { IPv4(199,97,192,0),22 },
+       { IPv4(199,97,212,0),24 },
+       { IPv4(199,98,7,0),24 },
+       { IPv4(199,98,59,0),24 },
+       { IPv4(199,98,84,0),24 },
+       { IPv4(199,98,88,0),24 },
+       { IPv4(199,98,112,0),23 },
+       { IPv4(199,98,171,0),24 },
+       { IPv4(199,98,200,0),24 },
+       { IPv4(199,98,205,0),24 },
+       { IPv4(199,99,2,0),24 },
+       { IPv4(199,99,64,0),21 },
+       { IPv4(199,99,72,0),23 },
+       { IPv4(199,99,102,0),24 },
+       { IPv4(199,99,156,0),22 },
+       { IPv4(199,99,242,0),24 },
+       { IPv4(199,99,243,0),24 },
+       { IPv4(199,99,244,0),24 },
+       { IPv4(199,99,248,0),21 },
+       { IPv4(199,101,4,0),24 },
+       { IPv4(199,101,6,0),24 },
+       { IPv4(199,101,8,0),21 },
+       { IPv4(199,102,9,0),24 },
+       { IPv4(199,102,15,0),24 },
+       { IPv4(199,102,39,0),24 },
+       { IPv4(199,103,128,0),17 },
+       { IPv4(199,104,0,0),18 },
+       { IPv4(199,104,18,0),24 },
+       { IPv4(199,104,19,0),24 },
+       { IPv4(199,104,20,0),24 },
+       { IPv4(199,104,22,0),24 },
+       { IPv4(199,104,23,0),24 },
+       { IPv4(199,104,32,0),19 },
+       { IPv4(199,104,64,0),18 },
+       { IPv4(199,104,76,0),24 },
+       { IPv4(199,104,78,0),24 },
+       { IPv4(199,104,79,0),24 },
+       { IPv4(199,104,80,0),24 },
+       { IPv4(199,104,81,0),24 },
+       { IPv4(199,104,82,0),24 },
+       { IPv4(199,104,83,0),24 },
+       { IPv4(199,104,84,0),24 },
+       { IPv4(199,104,107,0),24 },
+       { IPv4(199,104,108,0),24 },
+       { IPv4(199,104,112,0),24 },
+       { IPv4(199,104,113,0),24 },
+       { IPv4(199,104,114,0),24 },
+       { IPv4(199,104,115,0),24 },
+       { IPv4(199,104,116,0),24 },
+       { IPv4(199,104,117,0),24 },
+       { IPv4(199,104,118,0),24 },
+       { IPv4(199,104,119,0),24 },
+       { IPv4(199,104,120,0),21 },
+       { IPv4(199,104,128,0),19 },
+       { IPv4(199,104,128,0),24 },
+       { IPv4(199,104,128,0),17 },
+       { IPv4(199,104,132,0),24 },
+       { IPv4(199,104,138,0),23 },
+       { IPv4(199,104,140,0),22 },
+       { IPv4(199,104,144,0),20 },
+       { IPv4(199,104,146,0),24 },
+       { IPv4(199,104,148,0),24 },
+       { IPv4(199,104,149,0),24 },
+       { IPv4(199,104,151,0),24 },
+       { IPv4(199,104,192,0),19 },
+       { IPv4(199,104,192,0),18 },
+       { IPv4(199,104,231,0),24 },
+       { IPv4(199,104,232,0),21 },
+       { IPv4(199,104,244,0),23 },
+       { IPv4(199,104,248,0),21 },
+       { IPv4(199,104,248,0),22 },
+       { IPv4(199,104,252,0),24 },
+       { IPv4(199,105,0,0),18 },
+       { IPv4(199,105,0,0),16 },
+       { IPv4(199,105,64,0),23 },
+       { IPv4(199,105,84,0),22 },
+       { IPv4(199,105,112,0),21 },
+       { IPv4(199,105,120,0),21 },
+       { IPv4(199,105,138,0),24 },
+       { IPv4(199,105,175,0),24 },
+       { IPv4(199,105,186,0),24 },
+       { IPv4(199,105,191,0),24 },
+       { IPv4(199,105,192,0),20 },
+       { IPv4(199,105,209,0),24 },
+       { IPv4(199,106,0,0),15 },
+       { IPv4(199,106,16,0),24 },
+       { IPv4(199,106,17,0),24 },
+       { IPv4(199,106,34,0),24 },
+       { IPv4(199,106,35,0),24 },
+       { IPv4(199,106,52,0),24 },
+       { IPv4(199,106,56,0),24 },
+       { IPv4(199,106,64,0),21 },
+       { IPv4(199,106,65,0),24 },
+       { IPv4(199,106,72,0),22 },
+       { IPv4(199,106,78,0),24 },
+       { IPv4(199,106,174,0),23 },
+       { IPv4(199,106,176,0),23 },
+       { IPv4(199,106,185,0),24 },
+       { IPv4(199,106,186,0),24 },
+       { IPv4(199,106,208,0),20 },
+       { IPv4(199,106,232,0),21 },
+       { IPv4(199,107,24,0),22 },
+       { IPv4(199,107,26,0),23 },
+       { IPv4(199,107,96,0),22 },
+       { IPv4(199,107,144,0),24 },
+       { IPv4(199,107,160,0),21 },
+       { IPv4(199,108,0,0),16 },
+       { IPv4(199,108,16,0),24 },
+       { IPv4(199,108,40,0),24 },
+       { IPv4(199,108,42,0),23 },
+       { IPv4(199,108,44,0),22 },
+       { IPv4(199,108,48,0),21 },
+       { IPv4(199,108,56,0),23 },
+       { IPv4(199,108,64,0),24 },
+       { IPv4(199,108,66,0),24 },
+       { IPv4(199,108,88,0),23 },
+       { IPv4(199,108,160,0),21 },
+       { IPv4(199,108,164,0),24 },
+       { IPv4(199,108,167,0),24 },
+       { IPv4(199,108,189,0),24 },
+       { IPv4(199,108,224,0),22 },
+       { IPv4(199,108,228,0),23 },
+       { IPv4(199,109,32,0),22 },
+       { IPv4(199,111,161,0),24 },
+       { IPv4(199,112,0,0),19 },
+       { IPv4(199,112,32,0),24 },
+       { IPv4(199,112,36,0),24 },
+       { IPv4(199,113,128,0),17 },
+       { IPv4(199,114,0,0),21 },
+       { IPv4(199,114,6,0),24 },
+       { IPv4(199,114,8,0),22 },
+       { IPv4(199,114,32,0),20 },
+       { IPv4(199,114,48,0),22 },
+       { IPv4(199,114,128,0),18 },
+       { IPv4(199,115,8,0),21 },
+       { IPv4(199,115,16,0),21 },
+       { IPv4(199,115,24,0),23 },
+       { IPv4(199,117,0,0),16 },
+       { IPv4(199,117,144,0),22 },
+       { IPv4(199,117,161,0),24 },
+       { IPv4(199,119,33,0),24 },
+       { IPv4(199,119,40,0),24 },
+       { IPv4(199,120,16,0),20 },
+       { IPv4(199,120,64,0),18 },
+       { IPv4(199,120,79,0),24 },
+       { IPv4(199,120,86,0),24 },
+       { IPv4(199,120,153,0),24 },
+       { IPv4(199,120,157,0),24 },
+       { IPv4(199,120,161,0),24 },
+       { IPv4(199,120,179,0),24 },
+       { IPv4(199,120,183,0),24 },
+       { IPv4(199,120,218,0),24 },
+       { IPv4(199,120,249,0),24 },
+       { IPv4(199,121,0,0),16 },
+       { IPv4(199,121,31,0),24 },
+       { IPv4(199,121,42,0),24 },
+       { IPv4(199,121,124,0),24 },
+       { IPv4(199,121,125,0),24 },
+       { IPv4(199,121,131,0),24 },
+       { IPv4(199,121,132,0),24 },
+       { IPv4(199,121,155,0),24 },
+       { IPv4(199,121,156,0),24 },
+       { IPv4(199,121,157,0),24 },
+       { IPv4(199,121,159,0),24 },
+       { IPv4(199,121,160,0),24 },
+       { IPv4(199,121,174,0),23 },
+       { IPv4(199,121,185,0),24 },
+       { IPv4(199,121,238,0),23 },
+       { IPv4(199,121,240,0),24 },
+       { IPv4(199,121,247,0),24 },
+       { IPv4(199,122,4,0),23 },
+       { IPv4(199,122,32,0),20 },
+       { IPv4(199,122,49,0),24 },
+       { IPv4(199,122,56,0),21 },
+       { IPv4(199,123,16,0),20 },
+       { IPv4(199,123,32,0),20 },
+       { IPv4(199,123,71,0),24 },
+       { IPv4(199,123,72,0),21 },
+       { IPv4(199,123,80,0),21 },
+       { IPv4(199,123,87,0),24 },
+       { IPv4(199,123,88,0),23 },
+       { IPv4(199,123,88,0),24 },
+       { IPv4(199,123,89,0),24 },
+       { IPv4(199,123,90,0),24 },
+       { IPv4(199,123,92,0),24 },
+       { IPv4(199,123,104,0),21 },
+       { IPv4(199,123,112,0),21 },
+       { IPv4(199,123,121,0),24 },
+       { IPv4(199,123,122,0),23 },
+       { IPv4(199,123,124,0),24 },
+       { IPv4(199,124,8,0),21 },
+       { IPv4(199,124,16,0),21 },
+       { IPv4(199,125,8,0),24 },
+       { IPv4(199,125,9,0),24 },
+       { IPv4(199,125,10,0),24 },
+       { IPv4(199,127,16,0),24 },
+       { IPv4(199,127,25,0),24 },
+       { IPv4(199,127,27,0),24 },
+       { IPv4(199,127,32,0),21 },
+       { IPv4(199,127,40,0),23 },
+       { IPv4(199,131,64,0),19 },
+       { IPv4(199,131,96,0),21 },
+       { IPv4(199,131,104,0),22 },
+       { IPv4(199,164,167,0),24 },
+       { IPv4(199,164,176,0),23 },
+       { IPv4(199,164,178,0),24 },
+       { IPv4(199,164,180,0),23 },
+       { IPv4(199,164,184,0),24 },
+       { IPv4(199,164,185,0),24 },
+       { IPv4(199,164,191,0),24 },
+       { IPv4(199,164,192,0),24 },
+       { IPv4(199,164,194,0),24 },
+       { IPv4(199,164,200,0),24 },
+       { IPv4(199,164,210,0),24 },
+       { IPv4(199,164,214,0),24 },
+       { IPv4(199,164,235,0),24 },
+       { IPv4(199,164,236,0),24 },
+       { IPv4(199,164,237,0),24 },
+       { IPv4(199,164,250,0),24 },
+       { IPv4(199,165,0,0),21 },
+       { IPv4(199,165,8,0),22 },
+       { IPv4(199,165,16,0),24 },
+       { IPv4(199,165,16,0),20 },
+       { IPv4(199,165,19,0),24 },
+       { IPv4(199,165,80,0),21 },
+       { IPv4(199,165,137,0),24 },
+       { IPv4(199,165,138,0),24 },
+       { IPv4(199,165,141,0),24 },
+       { IPv4(199,165,150,0),24 },
+       { IPv4(199,165,157,0),24 },
+       { IPv4(199,165,165,0),24 },
+       { IPv4(199,165,180,0),24 },
+       { IPv4(199,165,206,0),24 },
+       { IPv4(199,165,233,0),24 },
+       { IPv4(199,165,247,0),24 },
+       { IPv4(199,165,249,0),24 },
+       { IPv4(199,165,250,0),24 },
+       { IPv4(199,166,24,0),24 },
+       { IPv4(199,166,25,0),24 },
+       { IPv4(199,166,26,0),24 },
+       { IPv4(199,166,27,0),24 },
+       { IPv4(199,166,28,0),23 },
+       { IPv4(199,166,30,0),24 },
+       { IPv4(199,166,34,0),23 },
+       { IPv4(199,166,36,0),23 },
+       { IPv4(199,166,223,0),24 },
+       { IPv4(199,168,32,0),24 },
+       { IPv4(199,168,33,0),24 },
+       { IPv4(199,168,35,0),24 },
+       { IPv4(199,168,39,0),24 },
+       { IPv4(199,169,208,0),20 },
+       { IPv4(199,170,84,0),23 },
+       { IPv4(199,170,88,0),21 },
+       { IPv4(199,170,121,0),24 },
+       { IPv4(199,170,132,0),24 },
+       { IPv4(199,171,52,0),24 },
+       { IPv4(199,171,96,0),24 },
+       { IPv4(199,171,134,0),24 },
+       { IPv4(199,171,200,0),24 },
+       { IPv4(199,171,201,0),24 },
+       { IPv4(199,172,169,0),24 },
+       { IPv4(199,173,188,0),24 },
+       { IPv4(199,173,232,0),22 },
+       { IPv4(199,174,0,0),16 },
+       { IPv4(199,174,0,0),18 },
+       { IPv4(199,174,16,0),21 },
+       { IPv4(199,174,136,0),24 },
+       { IPv4(199,174,160,0),20 },
+       { IPv4(199,174,176,0),21 },
+       { IPv4(199,174,184,0),22 },
+       { IPv4(199,174,196,0),22 },
+       { IPv4(199,174,200,0),21 },
+       { IPv4(199,174,208,0),20 },
+       { IPv4(199,175,157,0),24 },
+       { IPv4(199,175,219,0),24 },
+       { IPv4(199,175,234,0),24 },
+       { IPv4(199,177,58,0),24 },
+       { IPv4(199,181,71,0),24 },
+       { IPv4(199,181,92,0),22 },
+       { IPv4(199,181,144,0),24 },
+       { IPv4(199,181,150,0),24 },
+       { IPv4(199,181,164,0),22 },
+       { IPv4(199,181,168,0),24 },
+       { IPv4(199,181,178,0),23 },
+       { IPv4(199,181,179,0),24 },
+       { IPv4(199,181,180,0),24 },
+       { IPv4(199,181,193,0),24 },
+       { IPv4(199,181,234,0),24 },
+       { IPv4(199,181,237,0),24 },
+       { IPv4(199,181,251,0),24 },
+       { IPv4(199,181,252,0),24 },
+       { IPv4(199,182,0,0),15 },
+       { IPv4(199,182,110,0),24 },
+       { IPv4(199,182,207,0),24 },
+       { IPv4(199,182,227,0),24 },
+       { IPv4(199,182,248,0),23 },
+       { IPv4(199,182,250,0),24 },
+       { IPv4(199,183,4,0),24 },
+       { IPv4(199,183,32,0),24 },
+       { IPv4(199,183,38,0),24 },
+       { IPv4(199,183,44,0),24 },
+       { IPv4(199,183,185,0),24 },
+       { IPv4(199,183,186,0),24 },
+       { IPv4(199,184,16,0),20 },
+       { IPv4(199,184,75,0),24 },
+       { IPv4(199,184,82,0),24 },
+       { IPv4(199,184,120,0),22 },
+       { IPv4(199,184,162,0),24 },
+       { IPv4(199,184,165,0),24 },
+       { IPv4(199,184,210,0),24 },
+       { IPv4(199,184,226,0),24 },
+       { IPv4(199,184,227,0),24 },
+       { IPv4(199,184,228,0),24 },
+       { IPv4(199,184,236,0),23 },
+       { IPv4(199,184,238,0),24 },
+       { IPv4(199,184,241,0),24 },
+       { IPv4(199,184,243,0),24 },
+       { IPv4(199,184,244,0),24 },
+       { IPv4(199,184,252,0),24 },
+       { IPv4(199,185,1,0),24 },
+       { IPv4(199,185,4,0),24 },
+       { IPv4(199,185,8,0),23 },
+       { IPv4(199,185,104,0),24 },
+       { IPv4(199,185,109,0),24 },
+       { IPv4(199,185,110,0),24 },
+       { IPv4(199,185,116,0),24 },
+       { IPv4(199,185,117,0),24 },
+       { IPv4(199,185,136,0),23 },
+       { IPv4(199,185,230,0),23 },
+       { IPv4(199,186,0,0),16 },
+       { IPv4(199,189,0,0),24 },
+       { IPv4(199,189,1,0),24 },
+       { IPv4(199,189,2,0),24 },
+       { IPv4(199,189,3,0),24 },
+       { IPv4(199,189,4,0),24 },
+       { IPv4(199,189,5,0),24 },
+       { IPv4(199,189,8,0),21 },
+       { IPv4(199,190,64,0),18 },
+       { IPv4(199,190,65,0),24 },
+       { IPv4(199,190,87,0),24 },
+       { IPv4(199,190,99,0),24 },
+       { IPv4(199,190,100,0),24 },
+       { IPv4(199,190,104,0),24 },
+       { IPv4(199,190,116,0),24 },
+       { IPv4(199,190,134,0),24 },
+       { IPv4(199,190,161,0),24 },
+       { IPv4(199,190,174,0),24 },
+       { IPv4(199,190,175,0),24 },
+       { IPv4(199,190,178,0),23 },
+       { IPv4(199,190,180,0),24 },
+       { IPv4(199,190,182,0),24 },
+       { IPv4(199,190,198,0),23 },
+       { IPv4(199,190,200,0),24 },
+       { IPv4(199,190,201,0),24 },
+       { IPv4(199,190,209,0),24 },
+       { IPv4(199,190,222,0),23 },
+       { IPv4(199,190,224,0),23 },
+       { IPv4(199,190,227,0),24 },
+       { IPv4(199,190,244,0),24 },
+       { IPv4(199,190,245,0),24 },
+       { IPv4(199,190,246,0),24 },
+       { IPv4(199,190,247,0),24 },
+       { IPv4(199,190,248,0),24 },
+       { IPv4(199,191,0,0),20 },
+       { IPv4(199,191,32,0),24 },
+       { IPv4(199,191,33,0),24 },
+       { IPv4(199,191,34,0),24 },
+       { IPv4(199,191,35,0),24 },
+       { IPv4(199,191,36,0),24 },
+       { IPv4(199,191,37,0),24 },
+       { IPv4(199,191,128,0),21 },
+       { IPv4(199,191,128,0),22 },
+       { IPv4(199,191,136,0),21 },
+       { IPv4(199,191,144,0),22 },
+       { IPv4(199,191,144,0),21 },
+       { IPv4(199,191,152,0),21 },
+       { IPv4(199,191,160,0),24 },
+       { IPv4(199,191,192,0),24 },
+       { IPv4(199,191,200,0),24 },
+       { IPv4(199,191,208,0),24 },
+       { IPv4(199,192,0,0),21 },
+       { IPv4(199,192,8,0),22 },
+       { IPv4(199,195,64,0),19 },
+       { IPv4(199,195,112,0),23 },
+       { IPv4(199,196,54,0),24 },
+       { IPv4(199,197,0,0),21 },
+       { IPv4(199,197,8,0),22 },
+       { IPv4(199,198,129,0),24 },
+       { IPv4(199,199,0,0),16 },
+       { IPv4(199,199,32,0),19 },
+       { IPv4(199,199,70,0),24 },
+       { IPv4(199,199,118,0),23 },
+       { IPv4(199,199,120,0),21 },
+       { IPv4(199,199,220,0),24 },
+       { IPv4(199,200,9,0),24 },
+       { IPv4(199,200,128,0),24 },
+       { IPv4(199,201,0,0),16 },
+       { IPv4(199,201,6,0),24 },
+       { IPv4(199,201,16,0),22 },
+       { IPv4(199,201,18,0),24 },
+       { IPv4(199,201,20,0),24 },
+       { IPv4(199,201,27,0),24 },
+       { IPv4(199,201,128,0),24 },
+       { IPv4(199,201,129,0),24 },
+       { IPv4(199,201,140,0),24 },
+       { IPv4(199,201,143,0),24 },
+       { IPv4(199,201,145,0),24 },
+       { IPv4(199,201,147,0),24 },
+       { IPv4(199,201,153,0),24 },
+       { IPv4(199,201,154,0),24 },
+       { IPv4(199,201,156,0),24 },
+       { IPv4(199,201,157,0),24 },
+       { IPv4(199,201,158,0),24 },
+       { IPv4(199,201,175,0),24 },
+       { IPv4(199,201,181,0),24 },
+       { IPv4(199,201,192,0),24 },
+       { IPv4(199,201,213,0),24 },
+       { IPv4(199,201,223,0),24 },
+       { IPv4(199,201,231,0),24 },
+       { IPv4(199,201,232,0),22 },
+       { IPv4(199,201,236,0),24 },
+       { IPv4(199,201,237,0),24 },
+       { IPv4(199,201,248,0),24 },
+       { IPv4(199,201,248,0),23 },
+       { IPv4(199,201,249,0),24 },
+       { IPv4(199,202,64,0),24 },
+       { IPv4(199,205,1,0),24 },
+       { IPv4(199,208,0,0),20 },
+       { IPv4(199,208,1,0),24 },
+       { IPv4(199,208,4,0),24 },
+       { IPv4(199,208,5,0),24 },
+       { IPv4(199,208,6,0),24 },
+       { IPv4(199,208,7,0),24 },
+       { IPv4(199,208,16,0),24 },
+       { IPv4(199,208,19,0),24 },
+       { IPv4(199,208,20,0),24 },
+       { IPv4(199,208,21,0),24 },
+       { IPv4(199,208,22,0),24 },
+       { IPv4(199,208,23,0),24 },
+       { IPv4(199,208,25,0),24 },
+       { IPv4(199,208,64,0),18 },
+       { IPv4(199,208,88,0),23 },
+       { IPv4(199,208,91,0),24 },
+       { IPv4(199,208,92,0),24 },
+       { IPv4(199,208,110,0),24 },
+       { IPv4(199,208,128,0),18 },
+       { IPv4(199,208,157,0),24 },
+       { IPv4(199,208,158,0),24 },
+       { IPv4(199,208,172,0),24 },
+       { IPv4(199,208,189,0),24 },
+       { IPv4(199,208,193,0),24 },
+       { IPv4(199,208,194,0),23 },
+       { IPv4(199,208,197,0),24 },
+       { IPv4(199,208,200,0),22 },
+       { IPv4(199,208,213,0),24 },
+       { IPv4(199,208,224,0),19 },
+       { IPv4(199,208,242,0),24 },
+       { IPv4(199,208,247,0),24 },
+       { IPv4(199,208,248,0),24 },
+       { IPv4(199,208,249,0),24 },
+       { IPv4(199,209,0,0),23 },
+       { IPv4(199,209,8,0),22 },
+       { IPv4(199,209,12,0),24 },
+       { IPv4(199,209,16,0),22 },
+       { IPv4(199,209,20,0),23 },
+       { IPv4(199,209,22,0),24 },
+       { IPv4(199,209,32,0),19 },
+       { IPv4(199,209,33,0),24 },
+       { IPv4(199,209,38,0),24 },
+       { IPv4(199,209,39,0),24 },
+       { IPv4(199,209,64,0),19 },
+       { IPv4(199,209,96,0),24 },
+       { IPv4(199,209,98,0),24 },
+       { IPv4(199,209,99,0),24 },
+       { IPv4(199,209,128,0),17 },
+       { IPv4(199,210,0,0),16 },
+       { IPv4(199,211,39,0),24 },
+       { IPv4(199,211,64,0),18 },
+       { IPv4(199,211,65,0),24 },
+       { IPv4(199,211,81,0),24 },
+       { IPv4(199,211,89,0),24 },
+       { IPv4(199,211,100,0),24 },
+       { IPv4(199,211,117,0),24 },
+       { IPv4(199,211,118,0),23 },
+       { IPv4(199,211,120,0),24 },
+       { IPv4(199,211,121,0),24 },
+       { IPv4(199,211,122,0),24 },
+       { IPv4(199,211,128,0),17 },
+       { IPv4(199,211,128,0),23 },
+       { IPv4(199,211,131,0),24 },
+       { IPv4(199,211,134,0),24 },
+       { IPv4(199,211,150,0),24 },
+       { IPv4(199,211,157,0),24 },
+       { IPv4(199,211,158,0),23 },
+       { IPv4(199,211,160,0),24 },
+       { IPv4(199,211,162,0),24 },
+       { IPv4(199,211,163,0),24 },
+       { IPv4(199,211,172,0),22 },
+       { IPv4(199,211,180,0),24 },
+       { IPv4(199,211,181,0),24 },
+       { IPv4(199,211,182,0),24 },
+       { IPv4(199,211,183,0),24 },
+       { IPv4(199,211,192,0),23 },
+       { IPv4(199,211,197,0),24 },
+       { IPv4(199,211,198,0),24 },
+       { IPv4(199,211,199,0),24 },
+       { IPv4(199,211,211,0),24 },
+       { IPv4(199,211,214,0),24 },
+       { IPv4(199,211,219,0),24 },
+       { IPv4(199,211,220,0),24 },
+       { IPv4(199,211,225,0),24 },
+       { IPv4(199,211,226,0),24 },
+       { IPv4(199,211,228,0),24 },
+       { IPv4(199,211,253,0),24 },
+       { IPv4(199,212,18,0),24 },
+       { IPv4(199,212,45,0),24 },
+       { IPv4(199,212,53,0),24 },
+       { IPv4(199,212,63,0),24 },
+       { IPv4(199,212,120,0),22 },
+       { IPv4(199,212,129,0),24 },
+       { IPv4(199,212,132,0),24 },
+       { IPv4(199,212,134,0),24 },
+       { IPv4(199,212,135,0),24 },
+       { IPv4(199,212,144,0),24 },
+       { IPv4(199,212,150,0),24 },
+       { IPv4(199,212,200,0),24 },
+       { IPv4(199,212,215,0),24 },
+       { IPv4(199,212,232,0),24 },
+       { IPv4(199,216,250,0),23 },
+       { IPv4(199,217,8,0),21 },
+       { IPv4(199,217,128,0),17 },
+       { IPv4(199,217,157,0),24 },
+       { IPv4(199,217,214,0),23 },
+       { IPv4(199,217,217,0),24 },
+       { IPv4(199,217,220,0),24 },
+       { IPv4(199,219,5,0),24 },
+       { IPv4(199,219,128,0),18 },
+       { IPv4(199,219,192,0),20 },
+       { IPv4(199,219,208,0),21 },
+       { IPv4(199,219,216,0),24 },
+       { IPv4(199,221,65,0),24 },
+       { IPv4(199,221,224,0),19 },
+       { IPv4(199,222,4,0),24 },
+       { IPv4(199,222,33,0),24 },
+       { IPv4(199,222,100,0),24 },
+       { IPv4(199,222,128,0),20 },
+       { IPv4(199,222,141,0),24 },
+       { IPv4(199,222,160,0),20 },
+       { IPv4(199,223,139,0),24 },
+       { IPv4(199,223,145,0),24 },
+       { IPv4(199,223,148,0),24 },
+       { IPv4(199,223,149,0),24 },
+       { IPv4(199,223,155,0),24 },
+       { IPv4(199,223,156,0),24 },
+       { IPv4(199,223,178,0),24 },
+       { IPv4(199,223,180,0),24 },
+       { IPv4(199,224,0,0),20 },
+       { IPv4(199,224,64,0),18 },
+       { IPv4(199,225,0,0),20 },
+       { IPv4(199,226,4,0),22 },
+       { IPv4(199,226,8,0),21 },
+       { IPv4(199,226,16,0),21 },
+       { IPv4(199,226,64,0),19 },
+       { IPv4(199,226,84,0),22 },
+       { IPv4(199,226,96,0),20 },
+       { IPv4(199,226,112,0),21 },
+       { IPv4(199,226,120,0),22 },
+       { IPv4(199,226,124,0),22 },
+       { IPv4(199,226,156,0),24 },
+       { IPv4(199,227,0,0),24 },
+       { IPv4(199,227,0,0),16 },
+       { IPv4(199,227,52,0),23 },
+       { IPv4(199,227,56,0),23 },
+       { IPv4(199,227,72,0),24 },
+       { IPv4(199,227,100,0),23 },
+       { IPv4(199,227,103,0),24 },
+       { IPv4(199,227,115,0),24 },
+       { IPv4(199,227,118,0),23 },
+       { IPv4(199,227,120,0),23 },
+       { IPv4(199,227,124,0),24 },
+       { IPv4(199,227,127,0),24 },
+       { IPv4(199,227,141,0),24 },
+       { IPv4(199,227,153,0),24 },
+       { IPv4(199,227,154,0),24 },
+       { IPv4(199,227,158,0),24 },
+       { IPv4(199,227,190,0),23 },
+       { IPv4(199,227,208,0),23 },
+       { IPv4(199,228,181,0),24 },
+       { IPv4(199,229,1,0),24 },
+       { IPv4(199,229,9,0),24 },
+       { IPv4(199,229,10,0),24 },
+       { IPv4(199,229,12,0),24 },
+       { IPv4(199,229,13,0),24 },
+       { IPv4(199,229,14,0),24 },
+       { IPv4(199,229,20,0),24 },
+       { IPv4(199,229,69,0),24 },
+       { IPv4(199,229,97,0),24 },
+       { IPv4(199,229,103,0),24 },
+       { IPv4(199,229,115,0),24 },
+       { IPv4(199,230,16,0),21 },
+       { IPv4(199,230,26,0),24 },
+       { IPv4(199,230,29,0),24 },
+       { IPv4(199,230,128,0),24 },
+       { IPv4(199,230,129,0),24 },
+       { IPv4(199,230,130,0),24 },
+       { IPv4(199,230,249,0),24 },
+       { IPv4(199,231,48,0),24 },
+       { IPv4(199,231,49,0),24 },
+       { IPv4(199,231,50,0),24 },
+       { IPv4(199,231,51,0),24 },
+       { IPv4(199,232,0,0),16 },
+       { IPv4(199,232,2,0),24 },
+       { IPv4(199,232,56,0),21 },
+       { IPv4(199,232,74,104),30 },
+       { IPv4(199,232,92,0),22 },
+       { IPv4(199,232,131,0),24 },
+       { IPv4(199,232,132,0),23 },
+       { IPv4(199,233,74,0),24 },
+       { IPv4(199,233,77,0),24 },
+       { IPv4(199,233,81,0),24 },
+       { IPv4(199,233,82,0),24 },
+       { IPv4(199,233,87,0),24 },
+       { IPv4(199,233,92,0),24 },
+       { IPv4(199,233,97,0),24 },
+       { IPv4(199,233,98,0),24 },
+       { IPv4(199,233,99,0),24 },
+       { IPv4(199,233,111,0),24 },
+       { IPv4(199,233,130,0),24 },
+       { IPv4(199,233,134,0),24 },
+       { IPv4(199,233,135,0),24 },
+       { IPv4(199,233,154,0),24 },
+       { IPv4(199,233,155,0),24 },
+       { IPv4(199,233,182,0),24 },
+       { IPv4(199,233,185,0),24 },
+       { IPv4(199,234,0,0),16 },
+       { IPv4(199,234,146,0),24 },
+       { IPv4(199,234,225,0),24 },
+       { IPv4(199,234,227,0),24 },
+       { IPv4(199,236,0,0),14 },
+       { IPv4(199,236,212,0),23 },
+       { IPv4(199,237,32,0),23 },
+       { IPv4(199,240,0,0),16 },
+       { IPv4(199,240,130,0),23 },
+       { IPv4(199,240,134,0),24 },
+       { IPv4(199,240,142,0),23 },
+       { IPv4(199,240,170,0),23 },
+       { IPv4(199,240,175,0),24 },
+       { IPv4(199,240,176,0),23 },
+       { IPv4(199,240,226,0),24 },
+       { IPv4(199,242,7,0),24 },
+       { IPv4(199,242,138,0),24 },
+       { IPv4(199,242,141,0),24 },
+       { IPv4(199,242,169,0),24 },
+       { IPv4(199,242,201,0),24 },
+       { IPv4(199,242,206,0),24 },
+       { IPv4(199,242,211,0),24 },
+       { IPv4(199,242,223,0),24 },
+       { IPv4(199,242,244,0),24 },
+       { IPv4(199,244,33,0),24 },
+       { IPv4(199,244,35,0),24 },
+       { IPv4(199,244,192,0),22 },
+       { IPv4(199,244,196,0),23 },
+       { IPv4(199,244,223,0),24 },
+       { IPv4(199,245,16,0),20 },
+       { IPv4(199,245,32,0),24 },
+       { IPv4(199,245,64,0),24 },
+       { IPv4(199,245,81,0),24 },
+       { IPv4(199,245,82,0),24 },
+       { IPv4(199,245,86,0),24 },
+       { IPv4(199,245,87,0),24 },
+       { IPv4(199,245,89,0),24 },
+       { IPv4(199,245,95,0),24 },
+       { IPv4(199,245,96,0),24 },
+       { IPv4(199,245,97,0),24 },
+       { IPv4(199,245,103,0),24 },
+       { IPv4(199,245,110,0),24 },
+       { IPv4(199,245,114,0),23 },
+       { IPv4(199,245,118,0),23 },
+       { IPv4(199,245,120,0),23 },
+       { IPv4(199,245,131,0),24 },
+       { IPv4(199,245,134,0),23 },
+       { IPv4(199,245,140,0),24 },
+       { IPv4(199,245,155,0),24 },
+       { IPv4(199,245,156,0),24 },
+       { IPv4(199,245,157,0),24 },
+       { IPv4(199,245,172,0),24 },
+       { IPv4(199,245,173,0),24 },
+       { IPv4(199,245,177,0),24 },
+       { IPv4(199,245,206,0),24 },
+       { IPv4(199,245,225,0),24 },
+       { IPv4(199,245,242,0),24 },
+       { IPv4(199,245,244,0),23 },
+       { IPv4(199,246,2,0),24 },
+       { IPv4(199,246,3,0),24 },
+       { IPv4(199,246,7,0),24 },
+       { IPv4(199,246,26,0),24 },
+       { IPv4(199,246,36,0),24 },
+       { IPv4(199,246,37,0),24 },
+       { IPv4(199,246,52,0),22 },
+       { IPv4(199,246,56,0),23 },
+       { IPv4(199,246,67,0),24 },
+       { IPv4(199,246,68,0),24 },
+       { IPv4(199,246,107,0),24 },
+       { IPv4(199,246,138,0),24 },
+       { IPv4(199,246,218,0),23 },
+       { IPv4(199,246,220,0),23 },
+       { IPv4(199,246,230,0),24 },
+       { IPv4(199,246,231,0),24 },
+       { IPv4(199,246,232,0),24 },
+       { IPv4(199,246,233,0),24 },
+       { IPv4(199,246,234,0),24 },
+       { IPv4(199,246,235,0),24 },
+       { IPv4(199,246,236,0),24 },
+       { IPv4(199,246,237,0),24 },
+       { IPv4(199,246,238,0),24 },
+       { IPv4(199,246,239,0),24 },
+       { IPv4(199,246,240,0),24 },
+       { IPv4(199,246,241,0),24 },
+       { IPv4(199,246,242,0),24 },
+       { IPv4(199,246,243,0),24 },
+       { IPv4(199,246,244,0),24 },
+       { IPv4(199,246,245,0),24 },
+       { IPv4(199,246,246,0),24 },
+       { IPv4(199,246,247,0),24 },
+       { IPv4(199,246,248,0),24 },
+       { IPv4(199,246,249,0),24 },
+       { IPv4(199,246,250,0),24 },
+       { IPv4(199,246,251,0),24 },
+       { IPv4(199,246,252,0),24 },
+       { IPv4(199,246,253,0),24 },
+       { IPv4(199,247,254,0),24 },
+       { IPv4(199,248,148,0),22 },
+       { IPv4(199,248,170,0),24 },
+       { IPv4(199,248,180,0),24 },
+       { IPv4(199,248,185,0),24 },
+       { IPv4(199,248,201,0),24 },
+       { IPv4(199,248,228,0),24 },
+       { IPv4(199,248,238,0),24 },
+       { IPv4(199,248,240,0),24 },
+       { IPv4(199,248,245,0),24 },
+       { IPv4(199,248,255,0),24 },
+       { IPv4(199,249,136,0),24 },
+       { IPv4(199,249,137,0),24 },
+       { IPv4(199,249,138,0),24 },
+       { IPv4(199,249,150,0),24 },
+       { IPv4(199,249,163,0),24 },
+       { IPv4(199,249,169,0),24 },
+       { IPv4(199,249,180,0),24 },
+       { IPv4(199,249,185,0),24 },
+       { IPv4(199,249,191,0),24 },
+       { IPv4(199,249,198,0),24 },
+       { IPv4(199,249,199,0),24 },
+       { IPv4(199,249,200,0),23 },
+       { IPv4(199,249,223,0),24 },
+       { IPv4(199,249,229,0),24 },
+       { IPv4(199,250,8,0),24 },
+       { IPv4(199,250,13,0),24 },
+       { IPv4(199,250,70,0),23 },
+       { IPv4(199,250,136,0),24 },
+       { IPv4(199,250,137,0),24 },
+       { IPv4(199,250,138,0),24 },
+       { IPv4(199,250,139,0),24 },
+       { IPv4(199,250,140,0),24 },
+       { IPv4(199,250,141,0),24 },
+       { IPv4(199,250,142,0),24 },
+       { IPv4(199,250,143,0),24 },
+       { IPv4(199,250,181,0),24 },
+       { IPv4(199,251,0,0),16 },
+       { IPv4(199,251,27,0),24 },
+       { IPv4(199,251,88,0),24 },
+       { IPv4(199,251,89,0),24 },
+       { IPv4(199,251,187,0),24 },
+       { IPv4(199,251,188,0),24 },
+       { IPv4(199,251,189,0),24 },
+       { IPv4(199,251,217,0),24 },
+       { IPv4(199,251,218,0),24 },
+       { IPv4(199,251,219,0),24 },
+       { IPv4(199,252,8,0),22 },
+       { IPv4(199,252,12,0),24 },
+       { IPv4(199,252,16,0),21 },
+       { IPv4(199,252,24,0),24 },
+       { IPv4(199,252,32,0),20 },
+       { IPv4(199,252,48,0),22 },
+       { IPv4(199,252,128,0),24 },
+       { IPv4(199,252,128,0),18 },
+       { IPv4(199,252,131,0),24 },
+       { IPv4(199,252,132,0),24 },
+       { IPv4(199,252,137,0),24 },
+       { IPv4(199,252,138,0),24 },
+       { IPv4(199,252,139,0),24 },
+       { IPv4(199,252,142,0),24 },
+       { IPv4(199,252,153,0),24 },
+       { IPv4(199,252,155,0),24 },
+       { IPv4(199,252,156,0),24 },
+       { IPv4(199,252,180,0),24 },
+       { IPv4(199,253,32,0),20 },
+       { IPv4(199,253,48,0),21 },
+       { IPv4(199,253,174,0),24 },
+       { IPv4(199,253,200,0),22 },
+       { IPv4(199,253,200,0),21 },
+       { IPv4(199,253,246,0),24 },
+       { IPv4(199,253,248,0),24 },
+       { IPv4(199,254,8,0),21 },
+       { IPv4(199,254,138,0),24 },
+       { IPv4(199,254,154,0),24 },
+       { IPv4(199,254,168,0),24 },
+       { IPv4(199,254,169,0),24 },
+       { IPv4(199,254,179,0),24 },
+       { IPv4(199,254,188,0),24 },
+       { IPv4(199,254,199,0),24 },
+       { IPv4(199,254,202,0),24 },
+       { IPv4(199,254,213,0),24 },
+       { IPv4(199,254,225,0),24 },
+       { IPv4(199,254,229,0),24 },
+       { IPv4(200,0,103,0),24 },
+       { IPv4(200,0,104,0),24 },
+       { IPv4(200,0,105,0),24 },
+       { IPv4(200,0,106,0),24 },
+       { IPv4(200,0,111,0),24 },
+       { IPv4(200,0,112,0),24 },
+       { IPv4(200,0,113,0),24 },
+       { IPv4(200,0,147,0),24 },
+       { IPv4(200,0,155,0),24 },
+       { IPv4(200,0,156,0),24 },
+       { IPv4(200,0,157,0),24 },
+       { IPv4(200,0,158,0),24 },
+       { IPv4(200,0,159,0),24 },
+       { IPv4(200,0,181,0),24 },
+       { IPv4(200,0,182,0),24 },
+       { IPv4(200,0,182,0),23 },
+       { IPv4(200,0,183,0),24 },
+       { IPv4(200,0,187,0),24 },
+       { IPv4(200,0,193,0),24 },
+       { IPv4(200,0,216,0),21 },
+       { IPv4(200,0,224,0),19 },
+       { IPv4(200,1,32,0),19 },
+       { IPv4(200,1,128,0),24 },
+       { IPv4(200,1,143,0),24 },
+       { IPv4(200,1,152,0),24 },
+       { IPv4(200,1,156,0),22 },
+       { IPv4(200,1,161,0),24 },
+       { IPv4(200,2,91,0),24 },
+       { IPv4(200,3,32,0),22 },
+       { IPv4(200,3,60,0),22 },
+       { IPv4(200,3,67,0),24 },
+       { IPv4(200,3,94,0),24 },
+       { IPv4(200,3,95,0),24 },
+       { IPv4(200,3,120,0),21 },
+       { IPv4(200,3,240,0),24 },
+       { IPv4(200,4,0,0),21 },
+       { IPv4(200,4,8,0),24 },
+       { IPv4(200,4,14,0),24 },
+       { IPv4(200,4,15,0),24 },
+       { IPv4(200,4,48,0),22 },
+       { IPv4(200,4,57,0),24 },
+       { IPv4(200,4,96,0),19 },
+       { IPv4(200,4,128,0),20 },
+       { IPv4(200,4,144,0),24 },
+       { IPv4(200,4,146,0),24 },
+       { IPv4(200,4,147,0),24 },
+       { IPv4(200,4,148,0),24 },
+       { IPv4(200,4,149,0),24 },
+       { IPv4(200,4,150,0),24 },
+       { IPv4(200,4,151,0),24 },
+       { IPv4(200,4,152,0),24 },
+       { IPv4(200,4,153,0),24 },
+       { IPv4(200,4,154,0),24 },
+       { IPv4(200,4,155,0),24 },
+       { IPv4(200,4,156,0),24 },
+       { IPv4(200,4,157,0),24 },
+       { IPv4(200,4,159,0),24 },
+       { IPv4(200,5,32,0),21 },
+       { IPv4(200,5,64,0),18 },
+       { IPv4(200,5,192,0),18 },
+       { IPv4(200,6,65,0),24 },
+       { IPv4(200,6,66,0),24 },
+       { IPv4(200,6,73,0),24 },
+       { IPv4(200,6,74,0),24 },
+       { IPv4(200,6,77,0),24 },
+       { IPv4(200,9,145,0),24 },
+       { IPv4(200,9,146,0),24 },
+       { IPv4(200,9,212,0),23 },
+       { IPv4(200,9,219,0),24 },
+       { IPv4(200,9,235,0),24 },
+       { IPv4(200,9,237,0),24 },
+       { IPv4(200,10,96,0),22 },
+       { IPv4(200,10,122,0),24 },
+       { IPv4(200,10,128,0),24 },
+       { IPv4(200,10,143,0),24 },
+       { IPv4(200,10,186,0),24 },
+       { IPv4(200,10,207,0),24 },
+       { IPv4(200,10,233,0),24 },
+       { IPv4(200,10,241,0),24 },
+       { IPv4(200,10,243,0),24 },
+       { IPv4(200,11,34,0),24 },
+       { IPv4(200,12,25,0),24 },
+       { IPv4(200,12,32,0),20 },
+       { IPv4(200,12,56,0),21 },
+       { IPv4(200,12,64,0),24 },
+       { IPv4(200,12,65,0),24 },
+       { IPv4(200,12,66,0),24 },
+       { IPv4(200,12,67,0),24 },
+       { IPv4(200,12,69,0),24 },
+       { IPv4(200,12,71,0),24 },
+       { IPv4(200,12,78,0),24 },
+       { IPv4(200,12,84,0),24 },
+       { IPv4(200,12,88,0),24 },
+       { IPv4(200,12,95,0),24 },
+       { IPv4(200,12,126,0),24 },
+       { IPv4(200,12,127,0),24 },
+       { IPv4(200,12,158,0),23 },
+       { IPv4(200,12,164,0),24 },
+       { IPv4(200,12,166,0),24 },
+       { IPv4(200,12,193,0),24 },
+       { IPv4(200,12,224,0),20 },
+       { IPv4(200,13,52,0),24 },
+       { IPv4(200,13,53,0),24 },
+       { IPv4(200,13,54,0),23 },
+       { IPv4(200,13,88,0),24 },
+       { IPv4(200,13,110,0),24 },
+       { IPv4(200,13,111,0),24 },
+       { IPv4(200,13,113,0),24 },
+       { IPv4(200,13,116,0),24 },
+       { IPv4(200,14,114,0),24 },
+       { IPv4(200,14,115,0),24 },
+       { IPv4(200,14,192,0),24 },
+       { IPv4(200,14,205,0),24 },
+       { IPv4(200,14,206,0),24 },
+       { IPv4(200,14,207,0),24 },
+       { IPv4(200,14,208,0),24 },
+       { IPv4(200,14,209,0),24 },
+       { IPv4(200,14,210,0),24 },
+       { IPv4(200,14,211,0),24 },
+       { IPv4(200,14,215,0),24 },
+       { IPv4(200,14,221,0),24 },
+       { IPv4(200,14,231,0),24 },
+       { IPv4(200,14,232,0),24 },
+       { IPv4(200,14,233,0),24 },
+       { IPv4(200,14,234,0),24 },
+       { IPv4(200,14,236,0),24 },
+       { IPv4(200,14,237,0),24 },
+       { IPv4(200,14,238,0),24 },
+       { IPv4(200,14,239,0),24 },
+       { IPv4(200,14,241,0),24 },
+       { IPv4(200,14,242,0),23 },
+       { IPv4(200,14,244,0),24 },
+       { IPv4(200,14,253,0),24 },
+       { IPv4(200,15,0,0),16 },
+       { IPv4(200,15,28,0),24 },
+       { IPv4(200,16,32,0),20 },
+       { IPv4(200,16,35,0),24 },
+       { IPv4(200,16,48,0),20 },
+       { IPv4(200,16,86,0),24 },
+       { IPv4(200,16,96,0),20 },
+       { IPv4(200,16,162,0),23 },
+       { IPv4(200,16,170,0),24 },
+       { IPv4(200,16,195,0),24 },
+       { IPv4(200,16,199,0),24 },
+       { IPv4(200,16,206,0),23 },
+       { IPv4(200,16,209,0),24 },
+       { IPv4(200,16,210,0),23 },
+       { IPv4(200,16,246,0),24 },
+       { IPv4(200,23,1,0),24 },
+       { IPv4(200,23,3,0),24 },
+       { IPv4(200,23,5,0),24 },
+       { IPv4(200,23,16,0),24 },
+       { IPv4(200,23,17,0),24 },
+       { IPv4(200,23,21,0),24 },
+       { IPv4(200,23,22,0),24 },
+       { IPv4(200,23,23,0),24 },
+       { IPv4(200,23,35,0),24 },
+       { IPv4(200,23,36,0),23 },
+       { IPv4(200,23,40,0),24 },
+       { IPv4(200,23,41,0),24 },
+       { IPv4(200,23,42,0),24 },
+       { IPv4(200,23,43,0),24 },
+       { IPv4(200,23,63,0),24 },
+       { IPv4(200,23,64,0),24 },
+       { IPv4(200,23,66,0),24 },
+       { IPv4(200,23,74,0),24 },
+       { IPv4(200,23,76,0),24 },
+       { IPv4(200,23,77,0),24 },
+       { IPv4(200,23,78,0),24 },
+       { IPv4(200,23,79,0),24 },
+       { IPv4(200,23,80,0),24 },
+       { IPv4(200,23,83,0),24 },
+       { IPv4(200,23,84,0),24 },
+       { IPv4(200,23,87,0),24 },
+       { IPv4(200,23,91,0),24 },
+       { IPv4(200,23,96,0),24 },
+       { IPv4(200,23,100,0),24 },
+       { IPv4(200,23,101,0),24 },
+       { IPv4(200,23,103,0),24 },
+       { IPv4(200,23,105,0),24 },
+       { IPv4(200,23,108,0),24 },
+       { IPv4(200,23,109,0),24 },
+       { IPv4(200,23,110,0),24 },
+       { IPv4(200,23,120,0),24 },
+       { IPv4(200,23,128,0),24 },
+       { IPv4(200,23,129,0),24 },
+       { IPv4(200,23,130,0),24 },
+       { IPv4(200,23,132,0),24 },
+       { IPv4(200,23,134,0),24 },
+       { IPv4(200,23,135,0),24 },
+       { IPv4(200,23,136,0),24 },
+       { IPv4(200,23,140,0),24 },
+       { IPv4(200,23,144,0),24 },
+       { IPv4(200,23,145,0),24 },
+       { IPv4(200,23,146,0),24 },
+       { IPv4(200,23,148,0),24 },
+       { IPv4(200,23,149,0),24 },
+       { IPv4(200,23,150,0),24 },
+       { IPv4(200,23,156,0),24 },
+       { IPv4(200,23,159,0),24 },
+       { IPv4(200,23,160,0),22 },
+       { IPv4(200,23,164,0),24 },
+       { IPv4(200,23,166,0),24 },
+       { IPv4(200,23,172,0),24 },
+       { IPv4(200,23,176,0),24 },
+       { IPv4(200,23,177,0),24 },
+       { IPv4(200,23,178,0),24 },
+       { IPv4(200,23,192,0),22 },
+       { IPv4(200,23,196,0),24 },
+       { IPv4(200,23,197,0),24 },
+       { IPv4(200,23,208,0),24 },
+       { IPv4(200,23,209,0),24 },
+       { IPv4(200,23,210,0),24 },
+       { IPv4(200,23,210,0),23 },
+       { IPv4(200,23,211,0),24 },
+       { IPv4(200,23,214,0),24 },
+       { IPv4(200,23,217,0),24 },
+       { IPv4(200,23,219,0),24 },
+       { IPv4(200,23,240,0),24 },
+       { IPv4(200,23,240,0),21 },
+       { IPv4(200,23,241,0),24 },
+       { IPv4(200,23,242,0),24 },
+       { IPv4(200,23,243,0),24 },
+       { IPv4(200,23,245,0),24 },
+       { IPv4(200,23,247,0),24 },
+       { IPv4(200,23,248,0),24 },
+       { IPv4(200,23,249,0),24 },
+       { IPv4(200,23,250,0),24 },
+       { IPv4(200,23,251,0),24 },
+       { IPv4(200,23,252,0),24 },
+       { IPv4(200,23,253,0),24 },
+       { IPv4(200,24,160,0),20 },
+       { IPv4(200,24,176,0),24 },
+       { IPv4(200,24,177,0),24 },
+       { IPv4(200,24,178,0),23 },
+       { IPv4(200,24,180,0),22 },
+       { IPv4(200,24,184,0),21 },
+       { IPv4(200,24,208,0),20 },
+       { IPv4(200,26,26,0),24 },
+       { IPv4(200,26,33,0),24 },
+       { IPv4(200,26,48,0),20 },
+       { IPv4(200,26,64,0),18 },
+       { IPv4(200,27,0,0),16 },
+       { IPv4(200,27,2,0),24 },
+       { IPv4(200,27,9,0),24 },
+       { IPv4(200,27,60,0),24 },
+       { IPv4(200,27,61,0),24 },
+       { IPv4(200,27,63,0),24 },
+       { IPv4(200,27,64,0),24 },
+       { IPv4(200,27,64,0),19 },
+       { IPv4(200,27,66,0),24 },
+       { IPv4(200,27,67,0),24 },
+       { IPv4(200,27,73,0),24 },
+       { IPv4(200,27,96,0),19 },
+       { IPv4(200,27,99,0),24 },
+       { IPv4(200,27,109,0),24 },
+       { IPv4(200,27,160,0),19 },
+       { IPv4(200,27,192,0),19 },
+       { IPv4(200,27,198,0),24 },
+       { IPv4(200,27,200,0),24 },
+       { IPv4(200,27,201,0),24 },
+       { IPv4(200,27,202,0),24 },
+       { IPv4(200,27,203,0),24 },
+       { IPv4(200,27,204,0),24 },
+       { IPv4(200,27,205,0),24 },
+       { IPv4(200,27,224,0),19 },
+       { IPv4(200,28,152,0),24 },
+       { IPv4(200,30,128,0),24 },
+       { IPv4(200,30,129,0),24 },
+       { IPv4(200,30,192,0),24 },
+       { IPv4(200,30,193,0),24 },
+       { IPv4(200,30,194,0),24 },
+       { IPv4(200,30,195,0),24 },
+       { IPv4(200,30,196,0),24 },
+       { IPv4(200,30,197,0),24 },
+       { IPv4(200,30,198,0),24 },
+       { IPv4(200,30,199,0),24 },
+       { IPv4(200,30,200,0),24 },
+       { IPv4(200,30,201,0),24 },
+       { IPv4(200,30,202,0),24 },
+       { IPv4(200,30,203,0),24 },
+       { IPv4(200,30,204,0),24 },
+       { IPv4(200,30,205,0),24 },
+       { IPv4(200,30,206,0),24 },
+       { IPv4(200,30,207,0),24 },
+       { IPv4(200,30,208,0),24 },
+       { IPv4(200,30,209,0),24 },
+       { IPv4(200,30,210,0),24 },
+       { IPv4(200,30,211,0),24 },
+       { IPv4(200,30,212,0),24 },
+       { IPv4(200,30,213,0),24 },
+       { IPv4(200,30,214,0),24 },
+       { IPv4(200,30,215,0),24 },
+       { IPv4(200,30,216,0),24 },
+       { IPv4(200,30,217,0),24 },
+       { IPv4(200,30,218,0),24 },
+       { IPv4(200,30,219,0),24 },
+       { IPv4(200,30,220,0),24 },
+       { IPv4(200,30,221,0),24 },
+       { IPv4(200,30,222,0),24 },
+       { IPv4(200,30,223,0),24 },
+       { IPv4(200,30,224,0),24 },
+       { IPv4(200,30,225,0),24 },
+       { IPv4(200,30,226,0),24 },
+       { IPv4(200,30,227,0),24 },
+       { IPv4(200,30,228,0),24 },
+       { IPv4(200,30,229,0),24 },
+       { IPv4(200,30,230,0),24 },
+       { IPv4(200,30,231,0),24 },
+       { IPv4(200,30,232,0),24 },
+       { IPv4(200,30,233,0),24 },
+       { IPv4(200,30,234,0),24 },
+       { IPv4(200,30,235,0),24 },
+       { IPv4(200,30,236,0),24 },
+       { IPv4(200,30,237,0),24 },
+       { IPv4(200,30,238,0),24 },
+       { IPv4(200,30,239,0),24 },
+       { IPv4(200,30,240,0),24 },
+       { IPv4(200,30,241,0),24 },
+       { IPv4(200,30,242,0),24 },
+       { IPv4(200,30,243,0),24 },
+       { IPv4(200,30,244,0),24 },
+       { IPv4(200,30,245,0),24 },
+       { IPv4(200,30,246,0),24 },
+       { IPv4(200,30,247,0),24 },
+       { IPv4(200,30,248,0),24 },
+       { IPv4(200,30,249,0),24 },
+       { IPv4(200,30,250,0),24 },
+       { IPv4(200,30,251,0),24 },
+       { IPv4(200,30,252,0),24 },
+       { IPv4(200,30,253,0),24 },
+       { IPv4(200,30,254,0),24 },
+       { IPv4(200,30,255,0),24 },
+       { IPv4(200,32,0,0),19 },
+       { IPv4(200,32,0,0),21 },
+       { IPv4(200,32,8,0),21 },
+       { IPv4(200,32,16,0),21 },
+       { IPv4(200,32,24,0),21 },
+       { IPv4(200,32,32,0),19 },
+       { IPv4(200,33,1,0),24 },
+       { IPv4(200,33,3,0),24 },
+       { IPv4(200,33,7,0),24 },
+       { IPv4(200,33,8,0),24 },
+       { IPv4(200,33,15,0),24 },
+       { IPv4(200,33,16,0),24 },
+       { IPv4(200,33,20,0),24 },
+       { IPv4(200,33,21,0),24 },
+       { IPv4(200,33,25,0),24 },
+       { IPv4(200,33,30,0),24 },
+       { IPv4(200,33,31,0),24 },
+       { IPv4(200,33,32,0),24 },
+       { IPv4(200,33,34,0),24 },
+       { IPv4(200,33,36,0),24 },
+       { IPv4(200,33,53,0),24 },
+       { IPv4(200,33,56,0),24 },
+       { IPv4(200,33,60,0),24 },
+       { IPv4(200,33,61,0),24 },
+       { IPv4(200,33,62,0),23 },
+       { IPv4(200,33,68,0),24 },
+       { IPv4(200,33,71,0),24 },
+       { IPv4(200,33,72,0),24 },
+       { IPv4(200,33,74,0),24 },
+       { IPv4(200,33,79,0),24 },
+       { IPv4(200,33,97,0),24 },
+       { IPv4(200,33,99,0),24 },
+       { IPv4(200,33,100,0),22 },
+       { IPv4(200,33,104,0),23 },
+       { IPv4(200,33,111,0),24 },
+       { IPv4(200,33,112,0),24 },
+       { IPv4(200,33,116,0),24 },
+       { IPv4(200,33,117,0),24 },
+       { IPv4(200,33,121,0),24 },
+       { IPv4(200,33,122,0),24 },
+       { IPv4(200,33,136,0),21 },
+       { IPv4(200,33,137,0),24 },
+       { IPv4(200,33,140,0),24 },
+       { IPv4(200,33,142,0),24 },
+       { IPv4(200,33,143,0),24 },
+       { IPv4(200,33,144,0),21 },
+       { IPv4(200,33,151,0),24 },
+       { IPv4(200,33,164,0),22 },
+       { IPv4(200,33,169,0),24 },
+       { IPv4(200,33,170,0),24 },
+       { IPv4(200,33,181,0),24 },
+       { IPv4(200,33,188,0),24 },
+       { IPv4(200,33,189,0),24 },
+       { IPv4(200,33,190,0),24 },
+       { IPv4(200,33,191,0),24 },
+       { IPv4(200,33,206,0),24 },
+       { IPv4(200,33,240,0),22 },
+       { IPv4(200,33,244,0),24 },
+       { IPv4(200,33,245,0),24 },
+       { IPv4(200,33,252,0),24 },
+       { IPv4(200,34,32,0),20 },
+       { IPv4(200,34,48,0),21 },
+       { IPv4(200,34,56,0),22 },
+       { IPv4(200,34,96,0),24 },
+       { IPv4(200,34,97,0),24 },
+       { IPv4(200,34,98,0),24 },
+       { IPv4(200,34,99,0),24 },
+       { IPv4(200,34,100,0),24 },
+       { IPv4(200,34,101,0),24 },
+       { IPv4(200,34,102,0),24 },
+       { IPv4(200,34,103,0),24 },
+       { IPv4(200,34,104,0),24 },
+       { IPv4(200,34,105,0),24 },
+       { IPv4(200,34,106,0),24 },
+       { IPv4(200,34,107,0),24 },
+       { IPv4(200,34,108,0),24 },
+       { IPv4(200,34,109,0),24 },
+       { IPv4(200,34,110,0),24 },
+       { IPv4(200,34,112,0),20 },
+       { IPv4(200,34,128,0),24 },
+       { IPv4(200,34,139,0),24 },
+       { IPv4(200,34,140,0),24 },
+       { IPv4(200,34,142,0),24 },
+       { IPv4(200,34,149,0),24 },
+       { IPv4(200,34,150,0),24 },
+       { IPv4(200,34,152,0),24 },
+       { IPv4(200,34,153,0),24 },
+       { IPv4(200,34,154,0),24 },
+       { IPv4(200,34,155,0),24 },
+       { IPv4(200,34,157,0),24 },
+       { IPv4(200,34,159,0),24 },
+       { IPv4(200,34,160,0),22 },
+       { IPv4(200,34,164,0),22 },
+       { IPv4(200,34,168,0),24 },
+       { IPv4(200,34,169,0),24 },
+       { IPv4(200,34,171,0),24 },
+       { IPv4(200,34,175,0),24 },
+       { IPv4(200,34,176,0),24 },
+       { IPv4(200,34,177,0),24 },
+       { IPv4(200,34,178,0),24 },
+       { IPv4(200,34,179,0),24 },
+       { IPv4(200,34,181,0),24 },
+       { IPv4(200,34,182,0),24 },
+       { IPv4(200,34,183,0),24 },
+       { IPv4(200,34,185,0),24 },
+       { IPv4(200,34,186,0),24 },
+       { IPv4(200,34,187,0),24 },
+       { IPv4(200,34,188,0),24 },
+       { IPv4(200,34,189,0),24 },
+       { IPv4(200,34,190,0),24 },
+       { IPv4(200,34,191,0),24 },
+       { IPv4(200,34,201,0),24 },
+       { IPv4(200,34,204,0),24 },
+       { IPv4(200,34,205,0),24 },
+       { IPv4(200,34,206,0),24 },
+       { IPv4(200,34,221,0),24 },
+       { IPv4(200,34,222,0),24 },
+       { IPv4(200,34,223,0),24 },
+       { IPv4(200,36,0,0),19 },
+       { IPv4(200,36,12,0),24 },
+       { IPv4(200,36,27,0),24 },
+       { IPv4(200,36,28,0),22 },
+       { IPv4(200,36,32,0),19 },
+       { IPv4(200,36,64,0),19 },
+       { IPv4(200,36,128,0),19 },
+       { IPv4(200,36,176,0),20 },
+       { IPv4(200,36,224,0),20 },
+       { IPv4(200,36,229,0),24 },
+       { IPv4(200,36,240,0),21 },
+       { IPv4(200,36,248,0),21 },
+       { IPv4(200,38,0,0),19 },
+       { IPv4(200,38,32,0),19 },
+       { IPv4(200,38,96,0),20 },
+       { IPv4(200,38,112,0),21 },
+       { IPv4(200,38,120,0),24 },
+       { IPv4(200,38,121,0),24 },
+       { IPv4(200,38,122,0),24 },
+       { IPv4(200,38,126,0),24 },
+       { IPv4(200,38,128,0),19 },
+       { IPv4(200,38,152,0),24 },
+       { IPv4(200,38,192,0),19 },
+       { IPv4(200,38,215,0),24 },
+       { IPv4(200,38,241,0),24 },
+       { IPv4(200,39,0,0),20 },
+       { IPv4(200,39,16,0),20 },
+       { IPv4(200,39,32,0),19 },
+       { IPv4(200,39,64,0),19 },
+       { IPv4(200,39,160,0),19 },
+       { IPv4(200,39,192,0),24 },
+       { IPv4(200,39,212,0),22 },
+       { IPv4(200,39,216,0),23 },
+       { IPv4(200,39,219,0),24 },
+       { IPv4(200,41,9,0),24 },
+       { IPv4(200,41,54,0),23 },
+       { IPv4(200,41,94,0),23 },
+       { IPv4(200,41,129,0),24 },
+       { IPv4(200,41,146,0),23 },
+       { IPv4(200,41,176,0),24 },
+       { IPv4(200,41,199,0),24 },
+       { IPv4(200,41,210,0),23 },
+       { IPv4(200,41,224,0),20 },
+       { IPv4(200,41,246,0),23 },
+       { IPv4(200,42,0,0),19 },
+       { IPv4(200,42,32,0),24 },
+       { IPv4(200,42,33,0),24 },
+       { IPv4(200,42,34,0),24 },
+       { IPv4(200,42,64,0),19 },
+       { IPv4(200,42,96,0),21 },
+       { IPv4(200,42,96,0),19 },
+       { IPv4(200,42,112,0),21 },
+       { IPv4(200,42,128,0),19 },
+       { IPv4(200,42,144,0),21 },
+       { IPv4(200,47,152,0),24 },
+       { IPv4(200,47,154,0),24 },
+       { IPv4(200,47,157,0),24 },
+       { IPv4(200,47,159,0),24 },
+       { IPv4(200,47,177,0),24 },
+       { IPv4(200,47,179,0),24 },
+       { IPv4(200,47,184,0),24 },
+       { IPv4(200,47,185,0),24 },
+       { IPv4(200,47,186,0),24 },
+       { IPv4(200,47,187,0),24 },
+       { IPv4(200,47,188,0),24 },
+       { IPv4(200,49,0,0),24 },
+       { IPv4(200,49,90,0),24 },
+       { IPv4(200,50,67,0),24 },
+       { IPv4(200,51,0,0),23 },
+       { IPv4(200,51,14,0),24 },
+       { IPv4(200,51,19,0),24 },
+       { IPv4(200,51,27,0),24 },
+       { IPv4(200,51,40,0),21 },
+       { IPv4(200,51,58,0),24 },
+       { IPv4(200,51,64,0),24 },
+       { IPv4(200,51,65,0),24 },
+       { IPv4(200,51,70,0),24 },
+       { IPv4(200,51,80,0),20 },
+       { IPv4(200,51,96,0),21 },
+       { IPv4(200,51,144,0),22 },
+       { IPv4(200,51,148,0),22 },
+       { IPv4(200,51,152,0),22 },
+       { IPv4(200,51,167,0),24 },
+       { IPv4(200,51,170,0),24 },
+       { IPv4(200,51,188,0),23 },
+       { IPv4(200,51,192,0),20 },
+       { IPv4(200,51,238,0),24 },
+       { IPv4(200,51,242,0),23 },
+       { IPv4(200,51,252,0),22 },
+       { IPv4(200,52,16,0),21 },
+       { IPv4(200,52,24,0),21 },
+       { IPv4(200,52,103,0),24 },
+       { IPv4(200,52,112,0),24 },
+       { IPv4(200,52,112,0),20 },
+       { IPv4(200,52,114,0),24 },
+       { IPv4(200,52,115,0),24 },
+       { IPv4(200,52,116,0),24 },
+       { IPv4(200,52,117,0),24 },
+       { IPv4(200,52,118,0),24 },
+       { IPv4(200,52,119,0),24 },
+       { IPv4(200,52,120,0),24 },
+       { IPv4(200,52,121,0),24 },
+       { IPv4(200,52,122,0),24 },
+       { IPv4(200,52,123,0),24 },
+       { IPv4(200,52,124,0),24 },
+       { IPv4(200,52,125,0),24 },
+       { IPv4(200,52,126,0),24 },
+       { IPv4(200,52,127,0),24 },
+       { IPv4(200,52,144,0),20 },
+       { IPv4(200,52,161,0),24 },
+       { IPv4(200,52,163,0),24 },
+       { IPv4(200,52,164,0),24 },
+       { IPv4(200,52,165,0),24 },
+       { IPv4(200,52,166,0),24 },
+       { IPv4(200,52,174,0),24 },
+       { IPv4(200,52,175,0),24 },
+       { IPv4(200,52,190,0),24 },
+       { IPv4(200,52,191,0),24 },
+       { IPv4(200,52,240,0),20 },
+       { IPv4(200,53,224,0),20 },
+       { IPv4(200,55,42,0),24 },
+       { IPv4(200,56,64,0),20 },
+       { IPv4(200,56,80,0),20 },
+       { IPv4(200,56,85,0),24 },
+       { IPv4(200,56,90,0),24 },
+       { IPv4(200,56,112,0),24 },
+       { IPv4(200,56,112,0),20 },
+       { IPv4(200,56,113,0),24 },
+       { IPv4(200,56,114,0),24 },
+       { IPv4(200,56,115,0),24 },
+       { IPv4(200,56,116,0),24 },
+       { IPv4(200,56,123,0),24 },
+       { IPv4(200,56,124,0),24 },
+       { IPv4(200,56,126,0),24 },
+       { IPv4(200,56,127,0),24 },
+       { IPv4(200,56,192,0),24 },
+       { IPv4(200,56,193,0),24 },
+       { IPv4(200,56,194,0),24 },
+       { IPv4(200,56,195,0),24 },
+       { IPv4(200,56,196,0),24 },
+       { IPv4(200,56,197,0),24 },
+       { IPv4(200,56,198,0),24 },
+       { IPv4(200,56,199,0),24 },
+       { IPv4(200,56,200,0),24 },
+       { IPv4(200,56,224,0),20 },
+       { IPv4(200,57,32,0),20 },
+       { IPv4(200,57,48,0),20 },
+       { IPv4(200,57,80,0),20 },
+       { IPv4(200,57,128,0),20 },
+       { IPv4(200,57,144,0),20 },
+       { IPv4(200,61,32,0),20 },
+       { IPv4(200,61,32,0),23 },
+       { IPv4(200,61,34,0),23 },
+       { IPv4(200,61,36,0),23 },
+       { IPv4(200,61,38,0),23 },
+       { IPv4(200,61,38,0),24 },
+       { IPv4(200,61,40,0),24 },
+       { IPv4(200,61,40,0),23 },
+       { IPv4(200,61,42,0),23 },
+       { IPv4(200,61,44,0),23 },
+       { IPv4(200,61,46,0),23 },
+       { IPv4(200,61,48,0),23 },
+       { IPv4(200,61,50,0),23 },
+       { IPv4(200,61,52,0),23 },
+       { IPv4(200,61,54,0),23 },
+       { IPv4(200,61,56,0),23 },
+       { IPv4(200,61,58,0),23 },
+       { IPv4(200,61,60,0),23 },
+       { IPv4(200,61,62,0),23 },
+       { IPv4(200,61,64,0),24 },
+       { IPv4(200,61,128,0),19 },
+       { IPv4(200,62,7,0),24 },
+       { IPv4(200,62,16,0),20 },
+       { IPv4(200,62,128,0),21 },
+       { IPv4(200,62,128,0),24 },
+       { IPv4(200,62,130,0),24 },
+       { IPv4(200,62,136,0),21 },
+       { IPv4(200,62,144,0),21 },
+       { IPv4(200,62,192,0),21 },
+       { IPv4(200,62,200,0),21 },
+       { IPv4(200,64,0,0),16 },
+       { IPv4(200,64,0,0),18 },
+       { IPv4(200,64,18,0),24 },
+       { IPv4(200,64,64,0),18 },
+       { IPv4(200,64,128,0),19 },
+       { IPv4(200,64,160,0),19 },
+       { IPv4(200,64,192,0),19 },
+       { IPv4(200,64,224,0),19 },
+       { IPv4(200,65,0,0),18 },
+       { IPv4(200,65,0,0),16 },
+       { IPv4(200,65,7,0),24 },
+       { IPv4(200,65,8,0),24 },
+       { IPv4(200,65,22,0),24 },
+       { IPv4(200,65,24,0),24 },
+       { IPv4(200,65,25,0),24 },
+       { IPv4(200,65,32,0),24 },
+       { IPv4(200,65,64,0),18 },
+       { IPv4(200,65,113,0),24 },
+       { IPv4(200,65,114,0),24 },
+       { IPv4(200,65,128,0),19 },
+       { IPv4(200,65,160,0),19 },
+       { IPv4(200,65,175,0),24 },
+       { IPv4(200,65,192,0),19 },
+       { IPv4(200,65,224,0),19 },
+       { IPv4(200,66,112,0),24 },
+       { IPv4(200,66,112,0),20 },
+       { IPv4(200,66,113,0),24 },
+       { IPv4(200,66,114,0),24 },
+       { IPv4(200,66,115,0),24 },
+       { IPv4(200,66,119,0),24 },
+       { IPv4(200,66,123,0),24 },
+       { IPv4(200,66,125,0),24 },
+       { IPv4(200,66,128,0),19 },
+       { IPv4(200,66,128,0),17 },
+       { IPv4(200,66,160,0),19 },
+       { IPv4(200,66,192,0),20 },
+       { IPv4(200,66,208,0),20 },
+       { IPv4(200,66,224,0),20 },
+       { IPv4(200,66,240,0),20 },
+       { IPv4(200,67,0,0),16 },
+       { IPv4(200,67,0,0),17 },
+       { IPv4(200,67,128,0),17 },
+       { IPv4(200,69,0,0),22 },
+       { IPv4(200,69,4,0),22 },
+       { IPv4(200,69,8,0),22 },
+       { IPv4(200,69,12,0),22 },
+       { IPv4(200,74,1,0),24 },
+       { IPv4(200,74,29,0),24 },
+       { IPv4(200,74,30,0),24 },
+       { IPv4(200,74,31,0),24 },
+       { IPv4(200,74,137,0),24 },
+       { IPv4(200,74,160,0),24 },
+       { IPv4(200,74,161,0),24 },
+       { IPv4(200,74,162,0),24 },
+       { IPv4(200,74,163,0),24 },
+       { IPv4(200,74,164,0),24 },
+       { IPv4(200,74,165,0),24 },
+       { IPv4(200,74,166,0),24 },
+       { IPv4(200,74,167,0),24 },
+       { IPv4(200,74,168,0),24 },
+       { IPv4(200,74,169,0),24 },
+       { IPv4(200,74,170,0),24 },
+       { IPv4(200,74,171,0),24 },
+       { IPv4(200,74,172,0),24 },
+       { IPv4(200,74,173,0),24 },
+       { IPv4(200,74,174,0),24 },
+       { IPv4(200,74,175,0),24 },
+       { IPv4(200,76,192,0),20 },
+       { IPv4(200,76,208,0),20 },
+       { IPv4(200,80,128,0),23 },
+       { IPv4(200,80,130,0),23 },
+       { IPv4(200,80,132,0),23 },
+       { IPv4(200,80,136,0),23 },
+       { IPv4(200,155,0,0),21 },
+       { IPv4(200,160,32,0),20 },
+       { IPv4(200,169,80,0),20 },
+       { IPv4(200,170,64,0),20 },
+       { IPv4(200,173,0,0),16 },
+       { IPv4(200,175,64,0),18 },
+       { IPv4(200,175,128,0),18 },
+       { IPv4(200,187,96,0),20 },
+       { IPv4(200,187,112,0),22 },
+       { IPv4(200,187,116,0),24 },
+       { IPv4(200,187,160,0),20 },
+       { IPv4(200,189,0,0),22 },
+       { IPv4(200,189,4,0),22 },
+       { IPv4(200,189,160,0),19 },
+       { IPv4(200,190,0,0),16 },
+       { IPv4(200,192,0,0),18 },
+       { IPv4(200,192,80,0),20 },
+       { IPv4(200,192,128,0),20 },
+       { IPv4(200,192,160,0),20 },
+       { IPv4(200,192,224,0),21 },
+       { IPv4(200,194,112,0),20 },
+       { IPv4(200,195,128,0),19 },
+       { IPv4(200,195,160,0),20 },
+       { IPv4(200,195,176,0),20 },
+       { IPv4(200,195,192,0),20 },
+       { IPv4(200,195,208,0),20 },
+       { IPv4(200,198,176,0),20 },
+       { IPv4(200,198,184,0),23 },
+       { IPv4(200,198,188,0),24 },
+       { IPv4(200,201,192,0),18 },
+       { IPv4(200,202,5,0),24 },
+       { IPv4(200,202,6,0),24 },
+       { IPv4(200,202,7,0),24 },
+       { IPv4(200,202,13,0),24 },
+       { IPv4(200,202,14,0),24 },
+       { IPv4(200,218,0,0),19 },
+       { IPv4(200,225,80,0),20 },
+       { IPv4(200,225,144,0),20 },
+       { IPv4(200,225,192,0),21 },
+       { IPv4(200,225,192,0),18 },
+       { IPv4(200,225,200,0),21 },
+       { IPv4(200,225,208,0),21 },
+       { IPv4(200,225,216,0),21 },
+       { IPv4(200,225,224,0),21 },
+       { IPv4(200,225,232,0),21 },
+       { IPv4(200,225,240,0),21 },
+       { IPv4(200,225,248,0),21 },
+       { IPv4(200,226,0,0),19 },
+       { IPv4(200,226,64,0),20 },
+       { IPv4(200,226,80,0),20 },
+       { IPv4(200,226,96,0),20 },
+       { IPv4(200,226,128,0),17 },
+       { IPv4(200,229,0,0),20 },
+       { IPv4(200,229,16,0),20 },
+       { IPv4(200,229,32,0),20 },
+       { IPv4(200,229,112,0),24 },
+       { IPv4(200,229,113,0),24 },
+       { IPv4(200,229,114,0),24 },
+       { IPv4(200,229,115,0),24 },
+       { IPv4(200,229,117,0),24 },
+       { IPv4(200,229,118,0),24 },
+       { IPv4(200,229,120,0),24 },
+       { IPv4(200,229,123,0),24 },
+       { IPv4(200,229,128,0),20 },
+       { IPv4(200,229,224,0),20 },
+       { IPv4(200,229,240,0),21 },
+       { IPv4(201,115,100,0),24 },
+       { IPv4(202,0,16,0),20 },
+       { IPv4(202,0,65,0),24 },
+       { IPv4(202,0,71,0),24 },
+       { IPv4(202,0,117,0),24 },
+       { IPv4(202,0,118,0),24 },
+       { IPv4(202,0,126,0),24 },
+       { IPv4(202,0,127,0),24 },
+       { IPv4(202,0,149,0),24 },
+       { IPv4(202,0,155,0),24 },
+       { IPv4(202,1,3,0),24 },
+       { IPv4(202,1,6,0),24 },
+       { IPv4(202,1,7,0),24 },
+       { IPv4(202,1,224,0),21 },
+       { IPv4(202,1,233,0),24 },
+       { IPv4(202,2,8,0),21 },
+       { IPv4(202,2,52,0),22 },
+       { IPv4(202,3,8,0),22 },
+       { IPv4(202,3,12,0),24 },
+       { IPv4(202,4,0,0),21 },
+       { IPv4(202,4,8,0),24 },
+       { IPv4(202,4,185,0),24 },
+       { IPv4(202,4,186,0),24 },
+       { IPv4(202,4,187,0),24 },
+       { IPv4(202,4,189,0),24 },
+       { IPv4(202,4,252,0),22 },
+       { IPv4(202,5,32,0),19 },
+       { IPv4(202,5,64,0),20 },
+       { IPv4(202,5,160,0),24 },
+       { IPv4(202,5,166,0),24 },
+       { IPv4(202,5,170,0),24 },
+       { IPv4(202,5,172,0),24 },
+       { IPv4(202,5,192,0),19 },
+       { IPv4(202,6,100,0),23 },
+       { IPv4(202,6,107,0),24 },
+       { IPv4(202,6,124,0),22 },
+       { IPv4(202,6,192,0),20 },
+       { IPv4(202,7,34,0),24 },
+       { IPv4(202,7,64,0),19 },
+       { IPv4(202,7,80,0),21 },
+       { IPv4(202,7,99,0),24 },
+       { IPv4(202,7,100,0),24 },
+       { IPv4(202,7,101,0),24 },
+       { IPv4(202,7,102,0),24 },
+       { IPv4(202,7,103,0),24 },
+       { IPv4(202,7,168,0),24 },
+       { IPv4(202,7,174,0),24 },
+       { IPv4(202,7,179,0),24 },
+       { IPv4(202,7,182,0),24 },
+       { IPv4(202,7,187,0),24 },
+       { IPv4(202,7,188,0),24 },
+       { IPv4(202,7,189,0),24 },
+       { IPv4(202,7,196,0),24 },
+       { IPv4(202,7,198,0),24 },
+       { IPv4(202,7,199,0),24 },
+       { IPv4(202,7,215,0),24 },
+       { IPv4(202,7,219,0),24 },
+       { IPv4(202,8,1,0),24 },
+       { IPv4(202,8,224,0),24 },
+       { IPv4(202,8,225,0),24 },
+       { IPv4(202,8,226,0),24 },
+       { IPv4(202,8,227,0),24 },
+       { IPv4(202,8,236,0),24 },
+       { IPv4(202,8,237,0),24 },
+       { IPv4(202,8,243,0),24 },
+       { IPv4(202,8,245,0),24 },
+       { IPv4(202,8,246,0),24 },
+       { IPv4(202,8,247,0),24 },
+       { IPv4(202,8,248,0),24 },
+       { IPv4(202,8,249,0),24 },
+       { IPv4(202,8,251,0),24 },
+       { IPv4(202,9,64,0),19 },
+       { IPv4(202,9,144,0),24 },
+       { IPv4(202,9,147,0),24 },
+       { IPv4(202,9,148,0),24 },
+       { IPv4(202,9,149,0),24 },
+       { IPv4(202,9,151,0),24 },
+       { IPv4(202,9,160,0),22 },
+       { IPv4(202,9,174,0),24 },
+       { IPv4(202,9,176,0),24 },
+       { IPv4(202,9,179,0),24 },
+       { IPv4(202,9,180,0),24 },
+       { IPv4(202,9,181,0),24 },
+       { IPv4(202,9,183,0),24 },
+       { IPv4(202,9,187,0),24 },
+       { IPv4(202,9,188,0),22 },
+       { IPv4(202,9,191,0),24 },
+       { IPv4(202,9,255,0),24 },
+       { IPv4(202,10,32,0),21 },
+       { IPv4(202,11,160,0),22 },
+       { IPv4(202,12,8,0),24 },
+       { IPv4(202,12,19,0),24 },
+       { IPv4(202,12,26,0),24 },
+       { IPv4(202,12,27,0),24 },
+       { IPv4(202,12,28,0),24 },
+       { IPv4(202,12,62,0),24 },
+       { IPv4(202,12,87,0),24 },
+       { IPv4(202,12,94,0),23 },
+       { IPv4(202,12,97,0),24 },
+       { IPv4(202,12,112,0),24 },
+       { IPv4(202,12,113,0),24 },
+       { IPv4(202,13,236,0),22 },
+       { IPv4(202,14,19,0),24 },
+       { IPv4(202,14,20,0),22 },
+       { IPv4(202,14,32,0),19 },
+       { IPv4(202,14,82,0),24 },
+       { IPv4(202,14,89,0),24 },
+       { IPv4(202,14,95,0),24 },
+       { IPv4(202,14,99,0),24 },
+       { IPv4(202,14,102,0),24 },
+       { IPv4(202,14,134,0),24 },
+       { IPv4(202,14,141,0),24 },
+       { IPv4(202,14,145,0),24 },
+       { IPv4(202,14,164,0),24 },
+       { IPv4(202,14,229,0),24 },
+       { IPv4(202,15,64,0),19 },
+       { IPv4(202,15,128,0),18 },
+       { IPv4(202,16,104,0),24 },
+       { IPv4(202,16,192,0),21 },
+       { IPv4(202,16,225,0),24 },
+       { IPv4(202,17,16,0),20 },
+       { IPv4(202,17,128,0),19 },
+       { IPv4(202,17,172,0),22 },
+       { IPv4(202,17,180,0),24 },
+       { IPv4(202,17,184,0),22 },
+       { IPv4(202,17,192,0),23 },
+       { IPv4(202,17,208,0),22 },
+       { IPv4(202,17,212,0),22 },
+       { IPv4(202,17,242,0),23 },
+       { IPv4(202,17,242,0),24 },
+       { IPv4(202,18,248,0),23 },
+       { IPv4(202,18,250,0),24 },
+       { IPv4(202,19,0,0),20 },
+       { IPv4(202,19,32,0),24 },
+       { IPv4(202,19,100,0),22 },
+       { IPv4(202,19,112,0),24 },
+       { IPv4(202,19,120,0),24 },
+       { IPv4(202,19,125,0),24 },
+       { IPv4(202,19,214,0),23 },
+       { IPv4(202,19,237,0),24 },
+       { IPv4(202,20,16,0),20 },
+       { IPv4(202,20,64,0),24 },
+       { IPv4(202,20,65,0),24 },
+       { IPv4(202,20,67,0),24 },
+       { IPv4(202,20,68,0),24 },
+       { IPv4(202,20,81,0),24 },
+       { IPv4(202,20,84,0),23 },
+       { IPv4(202,20,92,0),24 },
+       { IPv4(202,20,99,0),24 },
+       { IPv4(202,20,105,0),24 },
+       { IPv4(202,20,106,0),23 },
+       { IPv4(202,20,119,0),24 },
+       { IPv4(202,21,0,0),21 },
+       { IPv4(202,21,8,0),21 },
+       { IPv4(202,21,32,0),19 },
+       { IPv4(202,21,140,0),24 },
+       { IPv4(202,21,144,0),24 },
+       { IPv4(202,21,149,0),24 },
+       { IPv4(202,21,157,0),24 },
+       { IPv4(202,22,8,0),21 },
+       { IPv4(202,22,32,0),19 },
+       { IPv4(202,22,163,0),24 },
+       { IPv4(202,22,166,0),24 },
+       { IPv4(202,22,167,0),24 },
+       { IPv4(202,22,252,0),24 },
+       { IPv4(202,22,255,0),24 },
+       { IPv4(202,23,72,0),21 },
+       { IPv4(202,23,88,0),24 },
+       { IPv4(202,23,93,0),24 },
+       { IPv4(202,23,124,0),24 },
+       { IPv4(202,24,40,0),22 },
+       { IPv4(202,24,192,0),24 },
+       { IPv4(202,25,80,0),20 },
+       { IPv4(202,25,99,0),24 },
+       { IPv4(202,25,115,0),24 },
+       { IPv4(202,25,116,0),22 },
+       { IPv4(202,25,162,0),23 },
+       { IPv4(202,25,192,0),20 },
+       { IPv4(202,26,92,0),24 },
+       { IPv4(202,26,94,0),23 },
+       { IPv4(202,26,187,0),24 },
+       { IPv4(202,26,240,0),21 },
+       { IPv4(202,27,0,0),16 },
+       { IPv4(202,27,16,0),20 },
+       { IPv4(202,27,17,0),24 },
+       { IPv4(202,27,48,0),21 },
+       { IPv4(202,27,56,0),22 },
+       { IPv4(202,27,76,0),24 },
+       { IPv4(202,27,77,0),24 },
+       { IPv4(202,27,83,0),24 },
+       { IPv4(202,27,100,0),22 },
+       { IPv4(202,27,110,0),24 },
+       { IPv4(202,27,140,0),22 },
+       { IPv4(202,27,156,0),22 },
+       { IPv4(202,27,184,0),23 },
+       { IPv4(202,27,192,0),21 },
+       { IPv4(202,27,200,0),22 },
+       { IPv4(202,27,204,0),24 },
+       { IPv4(202,27,209,0),24 },
+       { IPv4(202,27,210,0),24 },
+       { IPv4(202,27,211,0),24 },
+       { IPv4(202,27,212,0),22 },
+       { IPv4(202,27,216,0),22 },
+       { IPv4(202,27,217,0),24 },
+       { IPv4(202,27,222,0),24 },
+       { IPv4(202,27,236,0),24 },
+       { IPv4(202,27,247,0),24 },
+       { IPv4(202,27,250,0),24 },
+       { IPv4(202,27,251,0),24 },
+       { IPv4(202,28,17,0),24 },
+       { IPv4(202,28,24,0),22 },
+       { IPv4(202,28,24,0),24 },
+       { IPv4(202,28,25,0),24 },
+       { IPv4(202,28,26,0),24 },
+       { IPv4(202,28,27,0),24 },
+       { IPv4(202,28,32,0),22 },
+       { IPv4(202,28,68,0),24 },
+       { IPv4(202,28,69,0),24 },
+       { IPv4(202,28,70,0),24 },
+       { IPv4(202,28,71,0),24 },
+       { IPv4(202,28,92,0),24 },
+       { IPv4(202,28,92,0),22 },
+       { IPv4(202,28,116,0),22 },
+       { IPv4(202,28,128,0),21 },
+       { IPv4(202,28,136,0),21 },
+       { IPv4(202,28,144,0),21 },
+       { IPv4(202,28,152,0),21 },
+       { IPv4(202,28,160,0),21 },
+       { IPv4(202,28,168,0),21 },
+       { IPv4(202,28,176,0),21 },
+       { IPv4(202,28,184,0),21 },
+       { IPv4(202,30,0,0),19 },
+       { IPv4(202,30,14,0),23 },
+       { IPv4(202,30,32,0),23 },
+       { IPv4(202,30,34,0),24 },
+       { IPv4(202,30,35,0),24 },
+       { IPv4(202,30,36,0),23 },
+       { IPv4(202,30,38,0),24 },
+       { IPv4(202,30,40,0),22 },
+       { IPv4(202,30,44,0),24 },
+       { IPv4(202,30,46,0),23 },
+       { IPv4(202,30,49,0),24 },
+       { IPv4(202,30,50,0),23 },
+       { IPv4(202,30,52,0),23 },
+       { IPv4(202,30,54,0),24 },
+       { IPv4(202,30,55,0),24 },
+       { IPv4(202,30,56,0),23 },
+       { IPv4(202,30,58,0),24 },
+       { IPv4(202,30,60,0),22 },
+       { IPv4(202,30,64,0),19 },
+       { IPv4(202,30,89,0),24 },
+       { IPv4(202,30,94,0),24 },
+       { IPv4(202,30,96,0),20 },
+       { IPv4(202,30,112,0),21 },
+       { IPv4(202,30,128,0),17 },
+       { IPv4(202,30,128,0),18 },
+       { IPv4(202,30,183,0),24 },
+       { IPv4(202,30,184,0),24 },
+       { IPv4(202,30,190,0),24 },
+       { IPv4(202,30,192,0),24 },
+       { IPv4(202,30,192,0),18 },
+       { IPv4(202,30,192,0),19 },
+       { IPv4(202,30,193,0),24 },
+       { IPv4(202,30,201,0),24 },
+       { IPv4(202,30,224,0),19 },
+       { IPv4(202,30,224,0),24 },
+       { IPv4(202,30,231,0),24 },
+       { IPv4(202,30,232,0),24 },
+       { IPv4(202,30,233,0),24 },
+       { IPv4(202,30,234,0),24 },
+       { IPv4(202,30,235,0),24 },
+       { IPv4(202,30,236,0),24 },
+       { IPv4(202,30,237,0),24 },
+       { IPv4(202,30,238,0),24 },
+       { IPv4(202,31,23,0),24 },
+       { IPv4(202,31,24,0),22 },
+       { IPv4(202,31,29,0),24 },
+       { IPv4(202,31,30,0),23 },
+       { IPv4(202,31,32,0),20 },
+       { IPv4(202,31,48,0),22 },
+       { IPv4(202,31,52,0),23 },
+       { IPv4(202,31,54,0),24 },
+       { IPv4(202,31,56,0),21 },
+       { IPv4(202,31,64,0),21 },
+       { IPv4(202,31,72,0),23 },
+       { IPv4(202,31,75,0),24 },
+       { IPv4(202,31,76,0),22 },
+       { IPv4(202,31,80,0),21 },
+       { IPv4(202,31,88,0),22 },
+       { IPv4(202,31,92,0),23 },
+       { IPv4(202,31,128,0),20 },
+       { IPv4(202,31,144,0),21 },
+       { IPv4(202,31,152,0),24 },
+       { IPv4(202,31,153,0),24 },
+       { IPv4(202,31,154,0),24 },
+       { IPv4(202,31,156,0),22 },
+       { IPv4(202,31,160,0),20 },
+       { IPv4(202,31,180,0),24 },
+       { IPv4(202,31,181,0),24 },
+       { IPv4(202,31,184,0),21 },
+       { IPv4(202,31,192,0),20 },
+       { IPv4(202,31,208,0),22 },
+       { IPv4(202,31,222,0),24 },
+       { IPv4(202,31,224,0),19 },
+       { IPv4(202,33,0,0),16 },
+       { IPv4(202,34,32,0),24 },
+       { IPv4(202,35,72,0),22 },
+       { IPv4(202,35,230,0),24 },
+       { IPv4(202,36,0,0),16 },
+       { IPv4(202,36,43,0),24 },
+       { IPv4(202,36,45,0),24 },
+       { IPv4(202,36,46,0),24 },
+       { IPv4(202,36,75,0),24 },
+       { IPv4(202,36,80,0),24 },
+       { IPv4(202,36,114,0),24 },
+       { IPv4(202,36,121,0),24 },
+       { IPv4(202,36,147,0),24 },
+       { IPv4(202,36,148,0),24 },
+       { IPv4(202,36,154,0),24 },
+       { IPv4(202,36,164,0),22 },
+       { IPv4(202,36,164,0),23 },
+       { IPv4(202,36,166,0),23 },
+       { IPv4(202,36,174,0),24 },
+       { IPv4(202,36,195,0),24 },
+       { IPv4(202,36,202,0),24 },
+       { IPv4(202,36,204,0),23 },
+       { IPv4(202,36,226,0),24 },
+       { IPv4(202,36,227,0),24 },
+       { IPv4(202,36,235,0),24 },
+       { IPv4(202,37,0,0),16 },
+       { IPv4(202,37,0,0),20 },
+       { IPv4(202,37,64,0),23 },
+       { IPv4(202,37,70,0),24 },
+       { IPv4(202,37,71,0),24 },
+       { IPv4(202,37,75,0),24 },
+       { IPv4(202,37,86,0),23 },
+       { IPv4(202,37,88,0),24 },
+       { IPv4(202,37,93,0),24 },
+       { IPv4(202,37,106,0),24 },
+       { IPv4(202,37,107,0),24 },
+       { IPv4(202,37,117,0),24 },
+       { IPv4(202,37,118,0),24 },
+       { IPv4(202,37,120,0),24 },
+       { IPv4(202,37,124,0),23 },
+       { IPv4(202,37,127,0),24 },
+       { IPv4(202,37,129,0),24 },
+       { IPv4(202,37,160,0),24 },
+       { IPv4(202,37,168,0),24 },
+       { IPv4(202,37,220,0),24 },
+       { IPv4(202,37,240,0),23 },
+       { IPv4(202,37,254,0),24 },
+       { IPv4(202,38,8,0),21 },
+       { IPv4(202,38,45,0),24 },
+       { IPv4(202,38,132,0),22 },
+       { IPv4(202,38,161,0),24 },
+       { IPv4(202,38,164,0),22 },
+       { IPv4(202,39,0,0),18 },
+       { IPv4(202,39,64,0),20 },
+       { IPv4(202,39,112,0),20 },
+       { IPv4(202,39,128,0),17 },
+       { IPv4(202,40,16,0),20 },
+       { IPv4(202,40,224,0),21 },
+       { IPv4(202,40,224,0),19 },
+       { IPv4(202,41,106,0),24 },
+       { IPv4(202,43,64,0),19 },
+       { IPv4(202,43,96,0),19 },
+       { IPv4(202,43,248,0),21 },
+       { IPv4(202,44,8,0),21 },
+       { IPv4(202,44,64,0),24 },
+       { IPv4(202,44,68,0),22 },
+       { IPv4(202,44,140,0),23 },
+       { IPv4(202,44,142,0),23 },
+       { IPv4(202,44,144,0),24 },
+       { IPv4(202,44,148,0),22 },
+       { IPv4(202,44,192,0),18 },
+       { IPv4(202,44,216,0),24 },
+       { IPv4(202,46,0,0),20 },
+       { IPv4(202,46,24,0),22 },
+       { IPv4(202,46,28,0),24 },
+       { IPv4(202,46,31,0),24 },
+       { IPv4(202,46,64,0),20 },
+       { IPv4(202,46,80,0),22 },
+       { IPv4(202,46,84,0),24 },
+       { IPv4(202,46,108,0),22 },
+       { IPv4(202,46,130,0),23 },
+       { IPv4(202,46,240,0),20 },
+       { IPv4(202,47,1,0),24 },
+       { IPv4(202,47,56,0),24 },
+       { IPv4(202,47,64,0),20 },
+       { IPv4(202,47,125,0),24 },
+       { IPv4(202,47,132,0),23 },
+       { IPv4(202,47,140,0),24 },
+       { IPv4(202,47,160,0),19 },
+       { IPv4(202,47,224,0),20 },
+       { IPv4(202,47,240,0),21 },
+       { IPv4(202,47,248,0),23 },
+       { IPv4(202,47,250,0),24 },
+       { IPv4(202,47,251,0),24 },
+       { IPv4(202,47,252,0),24 },
+       { IPv4(202,47,252,0),23 },
+       { IPv4(202,47,253,0),24 },
+       { IPv4(202,47,254,0),24 },
+       { IPv4(202,48,8,0),21 },
+       { IPv4(202,48,48,0),20 },
+       { IPv4(202,48,96,0),22 },
+       { IPv4(202,48,106,0),23 },
+       { IPv4(202,48,160,0),22 },
+       { IPv4(202,48,192,0),24 },
+       { IPv4(202,48,208,0),24 },
+       { IPv4(202,49,0,0),21 },
+       { IPv4(202,49,16,0),20 },
+       { IPv4(202,49,62,0),24 },
+       { IPv4(202,49,64,0),21 },
+       { IPv4(202,49,80,0),23 },
+       { IPv4(202,49,84,0),24 },
+       { IPv4(202,49,141,0),24 },
+       { IPv4(202,49,152,0),21 },
+       { IPv4(202,49,172,0),22 },
+       { IPv4(202,49,183,0),24 },
+       { IPv4(202,49,189,0),24 },
+       { IPv4(202,49,224,0),20 },
+       { IPv4(202,49,233,0),24 },
+       { IPv4(202,49,250,0),24 },
+       { IPv4(202,50,0,0),16 },
+       { IPv4(202,50,49,0),24 },
+       { IPv4(202,50,52,0),22 },
+       { IPv4(202,50,60,0),22 },
+       { IPv4(202,50,64,0),21 },
+       { IPv4(202,50,72,0),24 },
+       { IPv4(202,50,94,0),24 },
+       { IPv4(202,50,95,0),24 },
+       { IPv4(202,50,112,0),24 },
+       { IPv4(202,50,137,0),24 },
+       { IPv4(202,50,143,0),24 },
+       { IPv4(202,50,164,0),24 },
+       { IPv4(202,50,170,0),24 },
+       { IPv4(202,50,177,0),24 },
+       { IPv4(202,50,196,0),22 },
+       { IPv4(202,50,200,0),21 },
+       { IPv4(202,50,208,0),20 },
+       { IPv4(202,51,64,0),24 },
+       { IPv4(202,51,65,0),24 },
+       { IPv4(202,51,71,0),24 },
+       { IPv4(202,51,75,0),24 },
+       { IPv4(202,51,76,0),24 },
+       { IPv4(202,51,77,0),24 },
+       { IPv4(202,51,88,0),24 },
+       { IPv4(202,51,93,0),24 },
+       { IPv4(202,51,94,0),24 },
+       { IPv4(202,51,95,0),24 },
+       { IPv4(202,51,96,0),19 },
+       { IPv4(202,51,136,0),24 },
+       { IPv4(202,51,137,0),24 },
+       { IPv4(202,51,138,0),24 },
+       { IPv4(202,51,151,0),24 },
+       { IPv4(202,51,159,0),24 },
+       { IPv4(202,51,192,0),20 },
+       { IPv4(202,51,192,0),19 },
+       { IPv4(202,51,208,0),20 },
+       { IPv4(202,52,32,0),22 },
+       { IPv4(202,52,64,0),18 },
+       { IPv4(202,52,128,0),19 },
+       { IPv4(202,52,224,0),19 },
+       { IPv4(202,53,90,0),23 },
+       { IPv4(202,53,96,0),20 },
+       { IPv4(202,53,224,0),22 },
+       { IPv4(202,53,226,0),23 },
+       { IPv4(202,53,228,0),23 },
+       { IPv4(202,53,230,0),24 },
+       { IPv4(202,53,232,0),22 },
+       { IPv4(202,53,240,0),21 },
+       { IPv4(202,53,248,0),21 },
+       { IPv4(202,55,143,0),24 },
+       { IPv4(202,56,32,0),23 },
+       { IPv4(202,56,48,0),20 },
+       { IPv4(202,56,152,0),24 },
+       { IPv4(202,56,153,0),24 },
+       { IPv4(202,56,156,0),23 },
+       { IPv4(202,56,158,0),23 },
+       { IPv4(202,56,158,0),24 },
+       { IPv4(202,56,159,0),24 },
+       { IPv4(202,56,192,0),24 },
+       { IPv4(202,56,193,0),24 },
+       { IPv4(202,56,194,0),24 },
+       { IPv4(202,56,195,0),24 },
+       { IPv4(202,56,196,0),24 },
+       { IPv4(202,56,196,0),23 },
+       { IPv4(202,56,196,0),22 },
+       { IPv4(202,56,197,0),24 },
+       { IPv4(202,56,198,0),24 },
+       { IPv4(202,56,198,0),23 },
+       { IPv4(202,56,199,0),24 },
+       { IPv4(202,56,200,0),22 },
+       { IPv4(202,56,204,0),22 },
+       { IPv4(202,56,204,0),24 },
+       { IPv4(202,56,205,0),24 },
+       { IPv4(202,56,206,0),24 },
+       { IPv4(202,56,207,0),24 },
+       { IPv4(202,56,212,0),22 },
+       { IPv4(202,56,216,0),22 },
+       { IPv4(202,56,220,0),22 },
+       { IPv4(202,56,221,0),24 },
+       { IPv4(202,56,222,0),24 },
+       { IPv4(202,56,223,0),24 },
+       { IPv4(202,56,224,0),22 },
+       { IPv4(202,56,224,0),24 },
+       { IPv4(202,56,224,0),20 },
+       { IPv4(202,56,225,0),24 },
+       { IPv4(202,56,229,0),24 },
+       { IPv4(202,56,230,0),24 },
+       { IPv4(202,56,231,0),24 },
+       { IPv4(202,56,232,0),22 },
+       { IPv4(202,56,236,0),22 },
+       { IPv4(202,56,237,0),24 },
+       { IPv4(202,56,239,0),24 },
+       { IPv4(202,56,241,0),24 },
+       { IPv4(202,56,245,0),24 },
+       { IPv4(202,56,248,0),21 },
+       { IPv4(202,56,248,0),24 },
+       { IPv4(202,56,249,0),24 },
+       { IPv4(202,56,250,0),24 },
+       { IPv4(202,56,251,0),24 },
+       { IPv4(202,56,252,0),24 },
+       { IPv4(202,56,252,0),22 },
+       { IPv4(202,56,253,0),24 },
+       { IPv4(202,56,254,0),24 },
+       { IPv4(202,56,255,0),24 },
+       { IPv4(202,57,0,0),24 },
+       { IPv4(202,57,1,0),24 },
+       { IPv4(202,57,2,0),24 },
+       { IPv4(202,57,3,0),24 },
+       { IPv4(202,57,32,0),24 },
+       { IPv4(202,57,33,0),24 },
+       { IPv4(202,57,34,0),24 },
+       { IPv4(202,57,35,0),24 },
+       { IPv4(202,57,39,0),24 },
+       { IPv4(202,57,49,0),24 },
+       { IPv4(202,57,96,0),19 },
+       { IPv4(202,57,128,0),18 },
+       { IPv4(202,58,64,0),20 },
+       { IPv4(202,58,96,0),19 },
+       { IPv4(202,58,100,0),24 },
+       { IPv4(202,58,104,0),24 },
+       { IPv4(202,58,113,0),24 },
+       { IPv4(202,58,115,0),24 },
+       { IPv4(202,58,116,0),24 },
+       { IPv4(202,58,117,0),24 },
+       { IPv4(202,58,118,0),24 },
+       { IPv4(202,58,122,0),24 },
+       { IPv4(202,58,192,0),22 },
+       { IPv4(202,58,196,0),23 },
+       { IPv4(202,59,70,0),24 },
+       { IPv4(202,59,71,0),24 },
+       { IPv4(202,59,72,0),24 },
+       { IPv4(202,59,73,0),24 },
+       { IPv4(202,59,224,0),19 },
+       { IPv4(202,60,192,0),24 },
+       { IPv4(202,60,192,0),21 },
+       { IPv4(202,61,64,0),19 },
+       { IPv4(202,61,64,0),22 },
+       { IPv4(202,61,68,0),22 },
+       { IPv4(202,61,72,0),22 },
+       { IPv4(202,61,77,0),24 },
+       { IPv4(202,61,84,0),23 },
+       { IPv4(202,61,199,0),24 },
+       { IPv4(202,61,236,0),24 },
+       { IPv4(202,61,236,0),22 },
+       { IPv4(202,61,237,0),24 },
+       { IPv4(202,61,238,0),24 },
+       { IPv4(202,61,239,0),24 },
+       { IPv4(202,62,68,0),22 },
+       { IPv4(202,62,72,0),22 },
+       { IPv4(202,62,83,0),24 },
+       { IPv4(202,62,85,0),24 },
+       { IPv4(202,62,94,0),24 },
+       { IPv4(202,62,95,0),24 },
+       { IPv4(202,62,128,0),19 },
+       { IPv4(202,62,192,0),19 },
+       { IPv4(202,63,192,0),19 },
+       { IPv4(202,63,218,0),24 },
+       { IPv4(202,63,219,0),24 },
+       { IPv4(202,65,69,0),24 },
+       { IPv4(202,66,24,0),24 },
+       { IPv4(202,67,32,0),20 },
+       { IPv4(202,68,143,0),24 },
+       { IPv4(202,68,144,0),24 },
+       { IPv4(202,68,147,0),24 },
+       { IPv4(202,68,158,0),24 },
+       { IPv4(202,70,32,0),21 },
+       { IPv4(202,71,141,0),24 },
+       { IPv4(202,71,150,0),24 },
+       { IPv4(202,71,151,0),24 },
+       { IPv4(202,71,152,0),24 },
+       { IPv4(202,71,153,0),24 },
+       { IPv4(202,71,154,0),24 },
+       { IPv4(202,71,155,0),24 },
+       { IPv4(202,71,158,0),24 },
+       { IPv4(202,71,159,0),24 },
+       { IPv4(202,72,32,0),20 },
+       { IPv4(202,72,64,0),21 },
+       { IPv4(202,72,64,0),24 },
+       { IPv4(202,72,65,0),24 },
+       { IPv4(202,72,66,0),24 },
+       { IPv4(202,72,67,0),24 },
+       { IPv4(202,72,68,0),24 },
+       { IPv4(202,72,69,0),24 },
+       { IPv4(202,72,70,0),24 },
+       { IPv4(202,72,71,0),24 },
+       { IPv4(202,72,72,0),24 },
+       { IPv4(202,72,72,0),21 },
+       { IPv4(202,72,73,0),24 },
+       { IPv4(202,72,74,0),24 },
+       { IPv4(202,72,75,0),24 },
+       { IPv4(202,72,76,0),24 },
+       { IPv4(202,72,77,0),24 },
+       { IPv4(202,72,78,0),24 },
+       { IPv4(202,72,79,0),24 },
+       { IPv4(202,72,128,0),19 },
+       { IPv4(202,74,32,0),19 },
+       { IPv4(202,74,96,0),20 },
+       { IPv4(202,74,102,0),24 },
+       { IPv4(202,74,112,0),20 },
+       { IPv4(202,75,32,0),20 },
+       { IPv4(202,75,96,0),20 },
+       { IPv4(202,75,128,0),18 },
+       { IPv4(202,77,96,0),22 },
+       { IPv4(202,77,100,0),22 },
+       { IPv4(202,77,104,0),22 },
+       { IPv4(202,77,108,0),22 },
+       { IPv4(202,77,112,0),22 },
+       { IPv4(202,77,116,0),24 },
+       { IPv4(202,77,116,0),22 },
+       { IPv4(202,77,117,0),24 },
+       { IPv4(202,77,120,0),21 },
+       { IPv4(202,78,128,0),21 },
+       { IPv4(202,78,128,0),19 },
+       { IPv4(202,78,137,0),24 },
+       { IPv4(202,78,156,0),22 },
+       { IPv4(202,78,156,0),24 },
+       { IPv4(202,78,157,0),24 },
+       { IPv4(202,78,158,0),24 },
+       { IPv4(202,78,159,0),24 },
+       { IPv4(202,79,32,0),24 },
+       { IPv4(202,79,33,0),24 },
+       { IPv4(202,79,34,0),24 },
+       { IPv4(202,79,35,0),24 },
+       { IPv4(202,79,36,0),24 },
+       { IPv4(202,79,37,0),24 },
+       { IPv4(202,79,38,0),24 },
+       { IPv4(202,79,39,0),24 },
+       { IPv4(202,79,40,0),24 },
+       { IPv4(202,79,41,0),24 },
+       { IPv4(202,79,42,0),24 },
+       { IPv4(202,79,43,0),24 },
+       { IPv4(202,79,44,0),24 },
+       { IPv4(202,79,45,0),24 },
+       { IPv4(202,79,46,0),24 },
+       { IPv4(202,79,47,0),24 },
+       { IPv4(202,79,48,0),24 },
+       { IPv4(202,79,49,0),24 },
+       { IPv4(202,79,50,0),24 },
+       { IPv4(202,79,51,0),24 },
+       { IPv4(202,79,52,0),24 },
+       { IPv4(202,79,53,0),24 },
+       { IPv4(202,79,54,0),24 },
+       { IPv4(202,79,55,0),24 },
+       { IPv4(202,79,56,0),24 },
+       { IPv4(202,79,57,0),24 },
+       { IPv4(202,79,58,0),24 },
+       { IPv4(202,79,59,0),24 },
+       { IPv4(202,79,128,0),19 },
+       { IPv4(202,79,192,0),19 },
+       { IPv4(202,80,224,0),19 },
+       { IPv4(202,81,96,0),24 },
+       { IPv4(202,81,97,0),24 },
+       { IPv4(202,81,98,0),24 },
+       { IPv4(202,81,99,0),24 },
+       { IPv4(202,81,100,0),24 },
+       { IPv4(202,81,101,0),24 },
+       { IPv4(202,81,102,0),24 },
+       { IPv4(202,81,108,0),24 },
+       { IPv4(202,81,120,0),24 },
+       { IPv4(202,81,121,0),24 },
+       { IPv4(202,81,127,0),24 },
+       { IPv4(202,83,32,0),19 },
+       { IPv4(202,84,10,0),23 },
+       { IPv4(202,84,16,0),24 },
+       { IPv4(202,84,17,0),24 },
+       { IPv4(202,84,146,0),23 },
+       { IPv4(202,85,160,0),22 },
+       { IPv4(202,88,135,0),24 },
+       { IPv4(202,88,136,0),24 },
+       { IPv4(202,88,149,0),24 },
+       { IPv4(202,88,152,0),24 },
+       { IPv4(202,88,160,0),24 },
+       { IPv4(202,88,224,0),21 },
+       { IPv4(202,88,232,0),21 },
+       { IPv4(202,89,64,0),21 },
+       { IPv4(202,89,96,0),19 },
+       { IPv4(202,89,128,0),19 },
+       { IPv4(202,89,196,0),24 },
+       { IPv4(202,89,196,0),22 },
+       { IPv4(202,89,197,0),24 },
+       { IPv4(202,89,199,0),24 },
+       { IPv4(202,89,200,0),24 },
+       { IPv4(202,89,201,0),24 },
+       { IPv4(202,90,192,0),24 },
+       { IPv4(202,90,193,0),24 },
+       { IPv4(202,90,194,0),24 },
+       { IPv4(202,91,128,0),23 },
+       { IPv4(202,91,130,0),23 },
+       { IPv4(202,92,0,0),22 },
+       { IPv4(202,92,32,0),23 },
+       { IPv4(202,92,47,0),24 },
+       { IPv4(202,92,64,0),19 },
+       { IPv4(202,92,88,0),21 },
+       { IPv4(202,92,96,0),19 },
+       { IPv4(202,92,100,0),24 },
+       { IPv4(202,93,0,0),20 },
+       { IPv4(202,93,64,0),19 },
+       { IPv4(202,93,128,0),19 },
+       { IPv4(202,93,252,0),22 },
+       { IPv4(202,94,0,0),19 },
+       { IPv4(202,94,160,0),20 },
+       { IPv4(202,95,0,0),22 },
+       { IPv4(202,95,128,0),19 },
+       { IPv4(202,95,144,0),23 },
+       { IPv4(202,95,152,0),22 },
+       { IPv4(202,95,156,0),22 },
+       { IPv4(202,96,0,0),18 },
+       { IPv4(202,96,64,0),19 },
+       { IPv4(202,96,96,0),19 },
+       { IPv4(202,96,128,0),18 },
+       { IPv4(202,96,192,0),19 },
+       { IPv4(202,96,192,0),18 },
+       { IPv4(202,96,224,0),19 },
+       { IPv4(202,97,0,0),19 },
+       { IPv4(202,97,32,0),19 },
+       { IPv4(202,97,96,0),20 },
+       { IPv4(202,97,96,0),19 },
+       { IPv4(202,97,128,0),19 },
+       { IPv4(202,97,160,0),19 },
+       { IPv4(202,98,0,0),19 },
+       { IPv4(202,98,32,0),19 },
+       { IPv4(202,98,64,0),19 },
+       { IPv4(202,98,96,0),19 },
+       { IPv4(202,98,128,0),19 },
+       { IPv4(202,98,160,0),19 },
+       { IPv4(202,98,192,0),19 },
+       { IPv4(202,98,224,0),19 },
+       { IPv4(202,99,0,0),18 },
+       { IPv4(202,99,64,0),18 },
+       { IPv4(202,99,128,0),19 },
+       { IPv4(202,99,160,0),19 },
+       { IPv4(202,100,64,0),19 },
+       { IPv4(202,100,96,0),19 },
+       { IPv4(202,100,128,0),19 },
+       { IPv4(202,100,160,0),19 },
+       { IPv4(202,100,192,0),19 },
+       { IPv4(202,100,224,0),19 },
+       { IPv4(202,101,0,0),16 },
+       { IPv4(202,101,0,0),18 },
+       { IPv4(202,101,64,0),19 },
+       { IPv4(202,101,96,0),19 },
+       { IPv4(202,101,128,0),19 },
+       { IPv4(202,101,160,0),19 },
+       { IPv4(202,101,192,0),18 },
+       { IPv4(202,102,0,0),17 },
+       { IPv4(202,102,128,0),18 },
+       { IPv4(202,102,192,0),19 },
+       { IPv4(202,102,224,0),19 },
+       { IPv4(202,103,0,0),18 },
+       { IPv4(202,103,64,0),19 },
+       { IPv4(202,103,96,0),19 },
+       { IPv4(202,103,128,0),18 },
+       { IPv4(202,103,192,0),19 },
+       { IPv4(202,103,224,0),19 },
+       { IPv4(202,104,0,0),16 },
+       { IPv4(202,105,0,0),16 },
+       { IPv4(202,106,0,0),19 },
+       { IPv4(202,106,32,0),20 },
+       { IPv4(202,106,48,0),20 },
+       { IPv4(202,106,64,0),18 },
+       { IPv4(202,106,128,0),18 },
+       { IPv4(202,106,192,0),19 },
+       { IPv4(202,106,224,0),19 },
+       { IPv4(202,107,0,0),17 },
+       { IPv4(202,107,128,0),18 },
+       { IPv4(202,107,192,0),18 },
+       { IPv4(202,108,0,0),17 },
+       { IPv4(202,108,128,0),17 },
+       { IPv4(202,109,0,0),17 },
+       { IPv4(202,109,128,0),18 },
+       { IPv4(202,109,192,0),18 },
+       { IPv4(202,110,0,0),18 },
+       { IPv4(202,110,64,0),18 },
+       { IPv4(202,110,128,0),18 },
+       { IPv4(202,110,192,0),18 },
+       { IPv4(202,111,0,0),17 },
+       { IPv4(202,111,128,0),19 },
+       { IPv4(202,111,160,0),19 },
+       { IPv4(202,111,192,0),19 },
+       { IPv4(202,111,224,0),19 },
+       { IPv4(202,112,248,0),24 },
+       { IPv4(202,122,1,0),24 },
+       { IPv4(202,122,7,0),24 },
+       { IPv4(202,122,128,0),24 },
+       { IPv4(202,125,80,0),20 },
+       { IPv4(202,127,0,0),23 },
+       { IPv4(202,127,12,0),22 },
+       { IPv4(202,127,16,0),20 },
+       { IPv4(202,127,40,0),21 },
+       { IPv4(202,127,48,0),23 },
+       { IPv4(202,127,144,0),23 },
+       { IPv4(202,127,157,0),24 },
+       { IPv4(202,127,159,0),24 },
+       { IPv4(202,127,160,0),21 },
+       { IPv4(202,127,192,0),23 },
+       { IPv4(202,127,200,0),21 },
+       { IPv4(202,128,128,0),19 },
+       { IPv4(202,128,132,0),24 },
+       { IPv4(202,129,0,0),19 },
+       { IPv4(202,129,192,0),19 },
+       { IPv4(202,130,0,0),19 },
+       { IPv4(202,130,70,0),23 },
+       { IPv4(202,130,72,0),22 },
+       { IPv4(202,130,76,0),23 },
+       { IPv4(202,130,79,0),24 },
+       { IPv4(202,130,80,0),23 },
+       { IPv4(202,130,82,0),23 },
+       { IPv4(202,130,96,0),19 },
+       { IPv4(202,130,96,0),23 },
+       { IPv4(202,130,104,0),23 },
+       { IPv4(202,130,106,0),24 },
+       { IPv4(202,130,224,0),20 },
+       { IPv4(202,130,240,0),21 },
+       { IPv4(202,130,248,0),21 },
+       { IPv4(202,131,0,0),21 },
+       { IPv4(202,131,114,0),24 },
+       { IPv4(202,131,119,0),24 },
+       { IPv4(202,131,144,0),20 },
+       { IPv4(202,131,224,0),19 },
+       { IPv4(202,133,3,0),24 },
+       { IPv4(202,133,75,0),24 },
+       { IPv4(202,133,79,0),24 },
+       { IPv4(202,133,128,0),20 },
+       { IPv4(202,133,144,0),20 },
+       { IPv4(202,133,160,0),20 },
+       { IPv4(202,133,224,0),19 },
+       { IPv4(202,134,192,0),22 },
+       { IPv4(202,134,196,0),22 },
+       { IPv4(202,134,200,0),22 },
+       { IPv4(202,134,204,0),22 },
+       { IPv4(202,134,224,0),19 },
+       { IPv4(202,135,0,0),16 },
+       { IPv4(202,136,254,0),23 },
+       { IPv4(202,137,0,0),21 },
+       { IPv4(202,137,8,0),22 },
+       { IPv4(202,137,8,0),21 },
+       { IPv4(202,137,64,0),19 },
+       { IPv4(202,137,128,0),19 },
+       { IPv4(202,138,14,0),24 },
+       { IPv4(202,138,48,0),21 },
+       { IPv4(202,138,63,0),24 },
+       { IPv4(202,138,128,0),18 },
+       { IPv4(202,138,160,0),23 },
+       { IPv4(202,138,202,0),23 },
+       { IPv4(202,139,59,0),24 },
+       { IPv4(202,139,173,0),24 },
+       { IPv4(202,139,174,0),24 },
+       { IPv4(202,140,0,0),19 },
+       { IPv4(202,140,128,0),21 },
+       { IPv4(202,140,144,0),24 },
+       { IPv4(202,140,145,0),24 },
+       { IPv4(202,140,146,0),24 },
+       { IPv4(202,140,147,0),24 },
+       { IPv4(202,140,150,0),23 },
+       { IPv4(202,141,81,0),24 },
+       { IPv4(202,141,216,0),21 },
+       { IPv4(202,142,64,0),21 },
+       { IPv4(202,142,88,0),21 },
+       { IPv4(202,142,96,0),21 },
+       { IPv4(202,143,48,0),21 },
+       { IPv4(202,143,56,0),21 },
+       { IPv4(202,143,128,0),19 },
+       { IPv4(202,143,224,0),21 },
+       { IPv4(202,144,8,0),24 },
+       { IPv4(202,144,13,0),24 },
+       { IPv4(202,144,14,0),24 },
+       { IPv4(202,144,20,0),24 },
+       { IPv4(202,144,22,0),24 },
+       { IPv4(202,144,27,0),24 },
+       { IPv4(202,144,28,0),24 },
+       { IPv4(202,144,34,0),24 },
+       { IPv4(202,144,35,0),24 },
+       { IPv4(202,144,44,0),24 },
+       { IPv4(202,144,48,0),20 },
+       { IPv4(202,144,54,0),24 },
+       { IPv4(202,144,55,0),24 },
+       { IPv4(202,144,64,0),24 },
+       { IPv4(202,144,65,0),24 },
+       { IPv4(202,144,74,0),24 },
+       { IPv4(202,144,75,0),24 },
+       { IPv4(202,144,76,0),24 },
+       { IPv4(202,144,77,0),24 },
+       { IPv4(202,144,79,0),24 },
+       { IPv4(202,144,83,0),24 },
+       { IPv4(202,144,86,0),24 },
+       { IPv4(202,144,91,0),24 },
+       { IPv4(202,144,95,0),24 },
+       { IPv4(202,144,96,0),24 },
+       { IPv4(202,144,98,0),24 },
+       { IPv4(202,144,99,0),24 },
+       { IPv4(202,144,105,0),24 },
+       { IPv4(202,144,109,0),24 },
+       { IPv4(202,144,110,0),24 },
+       { IPv4(202,144,119,0),24 },
+       { IPv4(202,144,120,0),24 },
+       { IPv4(202,144,125,0),24 },
+       { IPv4(202,144,128,0),19 },
+       { IPv4(202,145,0,0),22 },
+       { IPv4(202,146,0,0),22 },
+       { IPv4(202,146,4,0),23 },
+       { IPv4(202,146,32,0),19 },
+       { IPv4(202,146,144,0),24 },
+       { IPv4(202,146,224,0),19 },
+       { IPv4(202,146,226,0),24 },
+       { IPv4(202,146,227,0),24 },
+       { IPv4(202,146,228,0),23 },
+       { IPv4(202,146,230,0),24 },
+       { IPv4(202,146,231,0),24 },
+       { IPv4(202,146,232,0),24 },
+       { IPv4(202,146,236,0),24 },
+       { IPv4(202,146,237,0),24 },
+       { IPv4(202,146,239,0),24 },
+       { IPv4(202,146,244,0),22 },
+       { IPv4(202,146,253,0),24 },
+       { IPv4(202,146,254,0),24 },
+       { IPv4(202,146,255,0),24 },
+       { IPv4(202,147,0,0),24 },
+       { IPv4(202,147,128,0),19 },
+       { IPv4(202,147,192,0),23 },
+       { IPv4(202,147,194,0),23 },
+       { IPv4(202,147,240,0),20 },
+       { IPv4(202,148,0,0),22 },
+       { IPv4(202,148,4,0),24 },
+       { IPv4(202,148,5,0),24 },
+       { IPv4(202,148,6,0),24 },
+       { IPv4(202,148,7,0),24 },
+       { IPv4(202,148,8,0),21 },
+       { IPv4(202,148,11,0),24 },
+       { IPv4(202,148,16,0),24 },
+       { IPv4(202,148,17,0),24 },
+       { IPv4(202,148,20,0),24 },
+       { IPv4(202,149,79,0),24 },
+       { IPv4(202,149,80,0),23 },
+       { IPv4(202,149,82,0),24 },
+       { IPv4(202,149,128,0),20 },
+       { IPv4(202,149,128,0),19 },
+       { IPv4(202,149,144,0),22 },
+       { IPv4(202,149,148,0),22 },
+       { IPv4(202,149,152,0),24 },
+       { IPv4(202,149,208,0),21 },
+       { IPv4(202,149,216,0),21 },
+       { IPv4(202,149,216,0),24 },
+       { IPv4(202,149,240,0),21 },
+       { IPv4(202,149,248,0),21 },
+       { IPv4(202,150,0,0),21 },
+       { IPv4(202,150,8,0),21 },
+       { IPv4(202,150,32,0),20 },
+       { IPv4(202,150,46,0),24 },
+       { IPv4(202,150,47,0),24 },
+       { IPv4(202,150,64,0),19 },
+       { IPv4(202,150,224,0),19 },
+       { IPv4(202,151,32,0),24 },
+       { IPv4(202,151,192,0),18 },
+       { IPv4(202,152,0,0),19 },
+       { IPv4(202,152,0,0),22 },
+       { IPv4(202,152,4,0),22 },
+       { IPv4(202,152,12,0),22 },
+       { IPv4(202,152,16,0),22 },
+       { IPv4(202,152,20,0),22 },
+       { IPv4(202,152,24,0),22 },
+       { IPv4(202,152,28,0),22 },
+       { IPv4(202,152,32,0),20 },
+       { IPv4(202,152,156,0),24 },
+       { IPv4(202,152,224,0),19 },
+       { IPv4(202,153,32,0),22 },
+       { IPv4(202,153,42,0),23 },
+       { IPv4(202,153,128,0),21 },
+       { IPv4(202,153,224,0),23 },
+       { IPv4(202,153,224,0),20 },
+       { IPv4(202,153,240,0),20 },
+       { IPv4(202,153,248,0),21 },
+       { IPv4(202,154,0,0),22 },
+       { IPv4(202,154,4,0),22 },
+       { IPv4(202,154,4,0),24 },
+       { IPv4(202,154,8,0),22 },
+       { IPv4(202,154,12,0),22 },
+       { IPv4(202,154,16,0),22 },
+       { IPv4(202,154,16,0),20 },
+       { IPv4(202,154,20,0),22 },
+       { IPv4(202,154,24,0),22 },
+       { IPv4(202,154,24,0),24 },
+       { IPv4(202,154,28,0),22 },
+       { IPv4(202,154,29,0),24 },
+       { IPv4(202,154,32,0),20 },
+       { IPv4(202,154,32,0),22 },
+       { IPv4(202,154,36,0),22 },
+       { IPv4(202,154,40,0),22 },
+       { IPv4(202,154,42,0),24 },
+       { IPv4(202,154,43,0),24 },
+       { IPv4(202,154,44,0),22 },
+       { IPv4(202,154,48,0),22 },
+       { IPv4(202,154,48,0),20 },
+       { IPv4(202,154,52,0),22 },
+       { IPv4(202,154,56,0),22 },
+       { IPv4(202,154,60,0),22 },
+       { IPv4(202,154,64,0),20 },
+       { IPv4(202,154,128,0),19 },
+       { IPv4(202,154,192,0),19 },
+       { IPv4(202,155,0,0),22 },
+       { IPv4(202,155,3,0),24 },
+       { IPv4(202,155,4,0),23 },
+       { IPv4(202,155,6,0),24 },
+       { IPv4(202,155,7,0),24 },
+       { IPv4(202,155,8,0),24 },
+       { IPv4(202,155,9,0),24 },
+       { IPv4(202,155,10,0),23 },
+       { IPv4(202,155,12,0),22 },
+       { IPv4(202,155,16,0),22 },
+       { IPv4(202,155,20,0),22 },
+       { IPv4(202,155,24,0),23 },
+       { IPv4(202,155,26,0),23 },
+       { IPv4(202,155,28,0),23 },
+       { IPv4(202,155,30,0),23 },
+       { IPv4(202,155,32,0),23 },
+       { IPv4(202,155,34,0),23 },
+       { IPv4(202,155,36,0),23 },
+       { IPv4(202,155,38,0),23 },
+       { IPv4(202,155,40,0),22 },
+       { IPv4(202,155,44,0),22 },
+       { IPv4(202,155,48,0),24 },
+       { IPv4(202,155,49,0),24 },
+       { IPv4(202,155,50,0),23 },
+       { IPv4(202,155,52,0),23 },
+       { IPv4(202,155,54,0),23 },
+       { IPv4(202,155,56,0),24 },
+       { IPv4(202,155,57,0),24 },
+       { IPv4(202,155,58,0),24 },
+       { IPv4(202,155,59,0),24 },
+       { IPv4(202,155,60,0),23 },
+       { IPv4(202,155,62,0),23 },
+       { IPv4(202,155,64,0),23 },
+       { IPv4(202,155,66,0),23 },
+       { IPv4(202,155,68,0),23 },
+       { IPv4(202,155,70,0),23 },
+       { IPv4(202,155,72,0),23 },
+       { IPv4(202,155,74,0),23 },
+       { IPv4(202,155,76,0),23 },
+       { IPv4(202,155,78,0),23 },
+       { IPv4(202,155,80,0),23 },
+       { IPv4(202,155,82,0),23 },
+       { IPv4(202,155,84,0),23 },
+       { IPv4(202,155,86,0),24 },
+       { IPv4(202,155,87,0),24 },
+       { IPv4(202,155,88,0),24 },
+       { IPv4(202,155,89,0),24 },
+       { IPv4(202,155,90,0),23 },
+       { IPv4(202,155,92,0),23 },
+       { IPv4(202,155,93,0),24 },
+       { IPv4(202,155,94,0),23 },
+       { IPv4(202,155,96,0),23 },
+       { IPv4(202,155,98,0),23 },
+       { IPv4(202,155,100,0),23 },
+       { IPv4(202,155,102,0),24 },
+       { IPv4(202,155,103,0),24 },
+       { IPv4(202,155,104,0),23 },
+       { IPv4(202,155,106,0),23 },
+       { IPv4(202,155,108,0),23 },
+       { IPv4(202,155,110,0),23 },
+       { IPv4(202,155,112,0),23 },
+       { IPv4(202,155,114,0),23 },
+       { IPv4(202,155,116,0),23 },
+       { IPv4(202,155,118,0),23 },
+       { IPv4(202,155,120,0),23 },
+       { IPv4(202,155,122,0),23 },
+       { IPv4(202,155,124,0),24 },
+       { IPv4(202,155,125,0),24 },
+       { IPv4(202,155,126,0),24 },
+       { IPv4(202,155,127,0),24 },
+       { IPv4(202,156,0,0),16 },
+       { IPv4(202,156,0,0),19 },
+       { IPv4(202,156,32,0),19 },
+       { IPv4(202,156,64,0),19 },
+       { IPv4(202,156,96,0),19 },
+       { IPv4(202,156,128,0),19 },
+       { IPv4(202,156,160,0),19 },
+       { IPv4(202,156,192,0),19 },
+       { IPv4(202,156,224,0),19 },
+       { IPv4(202,157,0,0),23 },
+       { IPv4(202,157,67,0),24 },
+       { IPv4(202,157,128,0),19 },
+       { IPv4(202,157,160,0),21 },
+       { IPv4(202,157,182,0),23 },
+       { IPv4(202,158,0,0),18 },
+       { IPv4(202,158,0,0),19 },
+       { IPv4(202,158,0,0),17 },
+       { IPv4(202,158,24,0),22 },
+       { IPv4(202,158,24,0),21 },
+       { IPv4(202,158,28,0),22 },
+       { IPv4(202,158,31,0),24 },
+       { IPv4(202,158,32,0),21 },
+       { IPv4(202,158,32,0),19 },
+       { IPv4(202,158,36,0),22 },
+       { IPv4(202,158,40,0),21 },
+       { IPv4(202,158,48,0),21 },
+       { IPv4(202,158,48,0),22 },
+       { IPv4(202,158,52,0),22 },
+       { IPv4(202,158,56,0),21 },
+       { IPv4(202,158,64,0),21 },
+       { IPv4(202,158,64,0),19 },
+       { IPv4(202,158,72,0),21 },
+       { IPv4(202,158,80,0),21 },
+       { IPv4(202,158,80,0),22 },
+       { IPv4(202,158,80,0),24 },
+       { IPv4(202,158,80,0),23 },
+       { IPv4(202,158,82,0),23 },
+       { IPv4(202,158,84,0),22 },
+       { IPv4(202,158,88,0),22 },
+       { IPv4(202,158,92,0),22 },
+       { IPv4(202,158,96,0),21 },
+       { IPv4(202,158,96,0),19 },
+       { IPv4(202,158,96,0),20 },
+       { IPv4(202,158,96,0),22 },
+       { IPv4(202,158,100,0),22 },
+       { IPv4(202,158,104,0),22 },
+       { IPv4(202,158,104,0),21 },
+       { IPv4(202,158,108,0),22 },
+       { IPv4(202,158,112,0),20 },
+       { IPv4(202,158,112,0),21 },
+       { IPv4(202,158,112,0),22 },
+       { IPv4(202,158,116,0),22 },
+       { IPv4(202,158,120,0),22 },
+       { IPv4(202,158,120,0),21 },
+       { IPv4(202,158,124,0),22 },
+       { IPv4(202,159,0,0),19 },
+       { IPv4(202,159,32,0),22 },
+       { IPv4(202,159,36,0),24 },
+       { IPv4(202,159,37,0),24 },
+       { IPv4(202,159,38,0),23 },
+       { IPv4(202,159,40,0),22 },
+       { IPv4(202,159,44,0),23 },
+       { IPv4(202,159,46,0),24 },
+       { IPv4(202,159,47,0),24 },
+       { IPv4(202,159,48,0),20 },
+       { IPv4(202,159,64,0),19 },
+       { IPv4(202,159,96,0),19 },
+       { IPv4(202,160,0,0),19 },
+       { IPv4(202,160,64,0),19 },
+       { IPv4(202,160,224,0),19 },
+       { IPv4(202,160,235,0),24 },
+       { IPv4(202,161,0,0),21 },
+       { IPv4(202,161,31,0),24 },
+       { IPv4(202,161,32,0),19 },
+       { IPv4(202,161,128,0),19 },
+       { IPv4(202,161,160,0),20 },
+       { IPv4(202,162,192,0),20 },
+       { IPv4(202,163,96,0),19 },
+       { IPv4(202,163,128,0),24 },
+       { IPv4(202,163,129,0),24 },
+       { IPv4(202,163,130,0),24 },
+       { IPv4(202,163,131,0),24 },
+       { IPv4(202,163,132,0),24 },
+       { IPv4(202,163,224,0),19 },
+       { IPv4(202,163,234,0),24 },
+       { IPv4(202,163,240,0),20 },
+       { IPv4(202,163,248,0),21 },
+       { IPv4(202,163,248,0),24 },
+       { IPv4(202,164,32,0),21 },
+       { IPv4(202,164,96,0),19 },
+       { IPv4(202,164,160,0),19 },
+       { IPv4(202,164,185,0),24 },
+       { IPv4(202,165,0,0),19 },
+       { IPv4(202,165,40,0),21 },
+       { IPv4(202,165,64,0),19 },
+       { IPv4(202,165,64,0),20 },
+       { IPv4(202,165,70,0),23 },
+       { IPv4(202,165,80,0),20 },
+       { IPv4(202,165,225,0),24 },
+       { IPv4(202,165,230,0),24 },
+       { IPv4(202,165,231,0),24 },
+       { IPv4(202,165,246,0),24 },
+       { IPv4(202,166,0,0),17 },
+       { IPv4(202,166,160,0),19 },
+       { IPv4(202,166,192,0),18 },
+       { IPv4(202,167,4,0),24 },
+       { IPv4(202,168,192,0),20 },
+       { IPv4(202,168,254,0),23 },
+       { IPv4(202,169,128,0),18 },
+       { IPv4(202,169,224,0),20 },
+       { IPv4(202,171,64,0),24 },
+       { IPv4(202,171,65,0),24 },
+       { IPv4(202,171,66,0),24 },
+       { IPv4(202,171,67,0),24 },
+       { IPv4(202,171,68,0),24 },
+       { IPv4(202,171,69,0),24 },
+       { IPv4(202,171,70,0),24 },
+       { IPv4(202,171,71,0),24 },
+       { IPv4(202,171,72,0),24 },
+       { IPv4(202,171,73,0),24 },
+       { IPv4(202,171,74,0),24 },
+       { IPv4(202,171,75,0),24 },
+       { IPv4(202,171,76,0),24 },
+       { IPv4(202,171,77,0),24 },
+       { IPv4(202,171,78,0),24 },
+       { IPv4(202,171,192,0),20 },
+       { IPv4(202,172,106,0),24 },
+       { IPv4(202,172,120,0),24 },
+       { IPv4(202,172,121,0),24 },
+       { IPv4(202,172,122,0),24 },
+       { IPv4(202,172,123,0),24 },
+       { IPv4(202,172,124,0),24 },
+       { IPv4(202,172,210,0),24 },
+       { IPv4(202,172,224,0),19 },
+       { IPv4(202,173,32,0),19 },
+       { IPv4(202,173,64,0),22 },
+       { IPv4(202,173,69,0),24 },
+       { IPv4(202,173,70,0),24 },
+       { IPv4(202,174,144,0),24 },
+       { IPv4(202,177,0,0),19 },
+       { IPv4(202,177,128,0),20 },
+       { IPv4(202,177,128,0),19 },
+       { IPv4(202,177,136,0),23 },
+       { IPv4(202,177,138,0),23 },
+       { IPv4(202,177,140,0),22 },
+       { IPv4(202,177,144,0),20 },
+       { IPv4(202,177,150,0),23 },
+       { IPv4(202,177,156,0),22 },
+       { IPv4(202,177,160,0),19 },
+       { IPv4(202,177,160,0),23 },
+       { IPv4(202,177,170,0),23 },
+       { IPv4(202,178,128,0),18 },
+       { IPv4(202,178,128,0),17 },
+       { IPv4(202,178,192,0),19 },
+       { IPv4(202,178,224,0),19 },
+       { IPv4(202,179,0,0),19 },
+       { IPv4(202,179,64,0),23 },
+       { IPv4(202,179,66,0),24 },
+       { IPv4(202,179,137,0),24 },
+       { IPv4(202,179,147,0),24 },
+       { IPv4(202,179,150,0),24 },
+       { IPv4(202,179,154,0),24 },
+       { IPv4(202,179,157,0),24 },
+       { IPv4(202,179,158,0),24 },
+       { IPv4(202,180,0,0),20 },
+       { IPv4(202,180,0,0),24 },
+       { IPv4(202,180,1,0),24 },
+       { IPv4(202,180,10,0),24 },
+       { IPv4(202,180,11,0),24 },
+       { IPv4(202,180,12,0),24 },
+       { IPv4(202,180,13,0),24 },
+       { IPv4(202,180,16,0),21 },
+       { IPv4(202,180,21,0),24 },
+       { IPv4(202,180,24,0),22 },
+       { IPv4(202,180,28,0),22 },
+       { IPv4(202,180,64,0),18 },
+       { IPv4(202,180,64,0),19 },
+       { IPv4(202,180,96,0),19 },
+       { IPv4(202,181,136,0),21 },
+       { IPv4(202,181,144,0),20 },
+       { IPv4(202,181,184,0),21 },
+       { IPv4(202,181,216,0),21 },
+       { IPv4(202,182,0,0),19 },
+       { IPv4(202,182,16,0),20 },
+       { IPv4(202,182,224,0),24 },
+       { IPv4(202,182,225,0),24 },
+       { IPv4(202,183,0,0),19 },
+       { IPv4(202,183,128,0),17 },
+       { IPv4(202,183,188,0),22 },
+       { IPv4(202,183,192,0),18 },
+       { IPv4(202,183,214,0),24 },
+       { IPv4(202,183,233,0),24 },
+       { IPv4(202,183,234,0),24 },
+       { IPv4(202,184,0,0),15 },
+       { IPv4(202,186,0,0),15 },
+       { IPv4(202,188,0,0),17 },
+       { IPv4(202,188,0,0),16 },
+       { IPv4(202,188,128,0),17 },
+       { IPv4(202,189,0,0),18 },
+       { IPv4(202,190,0,0),16 },
+       { IPv4(202,208,160,0),19 },
+       { IPv4(202,208,192,0),19 },
+       { IPv4(202,208,224,0),19 },
+       { IPv4(202,210,11,0),24 },
+       { IPv4(202,210,60,0),22 },
+       { IPv4(202,210,64,0),18 },
+       { IPv4(202,211,128,0),17 },
+       { IPv4(202,213,0,0),22 },
+       { IPv4(202,213,17,0),24 },
+       { IPv4(202,213,160,0),20 },
+       { IPv4(202,215,0,0),16 },
+       { IPv4(202,216,0,0),19 },
+       { IPv4(202,217,128,0),17 },
+       { IPv4(202,220,6,0),23 },
+       { IPv4(202,220,37,0),24 },
+       { IPv4(202,220,40,0),21 },
+       { IPv4(202,220,70,0),23 },
+       { IPv4(202,220,93,0),24 },
+       { IPv4(202,220,124,0),22 },
+       { IPv4(202,220,160,0),19 },
+       { IPv4(202,222,0,0),20 },
+       { IPv4(202,222,192,0),18 },
+       { IPv4(202,224,64,0),19 },
+       { IPv4(202,225,0,0),16 },
+       { IPv4(202,227,0,0),18 },
+       { IPv4(202,227,192,0),18 },
+       { IPv4(202,228,0,0),18 },
+       { IPv4(202,228,128,0),18 },
+       { IPv4(202,231,64,0),18 },
+       { IPv4(202,231,128,0),19 },
+       { IPv4(202,231,160,0),19 },
+       { IPv4(202,235,0,0),18 },
+       { IPv4(202,236,36,0),23 },
+       { IPv4(202,236,144,0),23 },
+       { IPv4(202,236,160,0),23 },
+       { IPv4(202,236,167,0),24 },
+       { IPv4(202,237,0,0),23 },
+       { IPv4(202,237,13,0),24 },
+       { IPv4(202,237,115,0),24 },
+       { IPv4(202,237,147,0),24 },
+       { IPv4(202,237,154,0),24 },
+       { IPv4(202,237,175,0),24 },
+       { IPv4(202,237,176,0),22 },
+       { IPv4(202,237,192,0),19 },
+       { IPv4(202,238,32,0),20 },
+       { IPv4(202,238,128,0),18 },
+       { IPv4(202,239,128,0),18 },
+       { IPv4(202,239,192,0),18 },
+       { IPv4(202,240,112,0),22 },
+       { IPv4(202,240,176,0),23 },
+       { IPv4(202,241,0,0),17 },
+       { IPv4(202,242,5,0),24 },
+       { IPv4(202,242,18,0),23 },
+       { IPv4(202,242,20,0),24 },
+       { IPv4(202,242,57,0),24 },
+       { IPv4(202,242,76,0),23 },
+       { IPv4(202,242,78,0),23 },
+       { IPv4(202,242,132,0),22 },
+       { IPv4(202,242,240,0),23 },
+       { IPv4(202,243,104,0),24 },
+       { IPv4(202,243,105,0),24 },
+       { IPv4(202,243,186,0),24 },
+       { IPv4(202,243,216,0),24 },
+       { IPv4(202,244,4,0),24 },
+       { IPv4(202,244,32,0),21 },
+       { IPv4(202,244,58,0),24 },
+       { IPv4(202,244,70,0),24 },
+       { IPv4(202,244,71,0),24 },
+       { IPv4(202,244,93,0),24 },
+       { IPv4(202,244,95,0),24 },
+       { IPv4(202,244,152,0),24 },
+       { IPv4(202,244,160,0),19 },
+       { IPv4(202,245,131,0),24 },
+       { IPv4(202,245,142,0),24 },
+       { IPv4(202,245,148,0),23 },
+       { IPv4(202,245,153,0),24 },
+       { IPv4(202,245,162,0),24 },
+       { IPv4(202,245,172,0),23 },
+       { IPv4(202,245,174,0),24 },
+       { IPv4(202,245,244,0),24 },
+       { IPv4(202,245,254,0),24 },
+       { IPv4(202,246,4,0),22 },
+       { IPv4(202,246,14,0),24 },
+       { IPv4(202,246,20,0),22 },
+       { IPv4(202,246,54,0),24 },
+       { IPv4(202,246,114,0),24 },
+       { IPv4(202,246,160,0),22 },
+       { IPv4(202,246,164,0),24 },
+       { IPv4(202,246,244,0),22 },
+       { IPv4(202,246,248,0),21 },
+       { IPv4(202,247,0,0),17 },
+       { IPv4(202,249,0,0),17 },
+       { IPv4(202,250,75,0),24 },
+       { IPv4(202,250,219,0),24 },
+       { IPv4(202,250,236,0),24 },
+       { IPv4(202,251,241,0),24 },
+       { IPv4(202,252,96,0),21 },
+       { IPv4(202,252,116,0),22 },
+       { IPv4(202,252,206,0),24 },
+       { IPv4(202,253,104,0),24 },
+       { IPv4(202,253,208,0),24 },
+       { IPv4(202,253,223,0),24 },
+       { IPv4(202,253,243,0),24 },
+       { IPv4(202,254,64,0),23 },
+       { IPv4(202,254,106,0),24 },
+       { IPv4(202,254,111,0),24 },
+       { IPv4(202,255,16,0),21 },
+       { IPv4(202,255,40,0),22 },
+       { IPv4(202,255,44,0),22 },
+       { IPv4(202,255,72,0),22 },
+       { IPv4(202,255,204,0),22 },
+       { IPv4(203,0,12,0),24 },
+       { IPv4(203,0,15,0),24 },
+       { IPv4(203,0,25,0),24 },
+       { IPv4(203,0,27,0),24 },
+       { IPv4(203,0,31,0),24 },
+       { IPv4(203,0,38,0),24 },
+       { IPv4(203,0,41,0),24 },
+       { IPv4(203,0,98,0),24 },
+       { IPv4(203,0,112,0),24 },
+       { IPv4(203,0,124,0),22 },
+       { IPv4(203,0,145,0),24 },
+       { IPv4(203,0,146,0),23 },
+       { IPv4(203,0,148,0),23 },
+       { IPv4(203,0,154,0),24 },
+       { IPv4(203,0,155,0),24 },
+       { IPv4(203,0,225,0),24 },
+       { IPv4(203,1,24,0),24 },
+       { IPv4(203,1,89,0),24 },
+       { IPv4(203,1,109,0),24 },
+       { IPv4(203,1,237,0),24 },
+       { IPv4(203,1,250,0),24 },
+       { IPv4(203,1,251,0),24 },
+       { IPv4(203,1,255,0),24 },
+       { IPv4(203,2,228,0),24 },
+       { IPv4(203,3,44,0),24 },
+       { IPv4(203,3,71,0),24 },
+       { IPv4(203,3,79,0),24 },
+       { IPv4(203,3,101,0),24 },
+       { IPv4(203,3,127,0),24 },
+       { IPv4(203,3,129,0),24 },
+       { IPv4(203,3,134,0),24 },
+       { IPv4(203,3,138,0),24 },
+       { IPv4(203,3,144,0),20 },
+       { IPv4(203,4,148,0),23 },
+       { IPv4(203,4,161,0),24 },
+       { IPv4(203,4,185,0),24 },
+       { IPv4(203,4,190,0),24 },
+       { IPv4(203,4,192,0),21 },
+       { IPv4(203,4,224,0),24 },
+       { IPv4(203,5,6,0),24 },
+       { IPv4(203,5,13,0),24 },
+       { IPv4(203,5,20,0),24 },
+       { IPv4(203,5,23,0),24 },
+       { IPv4(203,5,24,0),24 },
+       { IPv4(203,5,30,0),24 },
+       { IPv4(203,5,31,0),24 },
+       { IPv4(203,5,62,0),24 },
+       { IPv4(203,5,78,0),23 },
+       { IPv4(203,5,127,0),24 },
+       { IPv4(203,5,168,0),22 },
+       { IPv4(203,5,248,0),24 },
+       { IPv4(203,5,249,0),24 },
+       { IPv4(203,5,250,0),24 },
+       { IPv4(203,5,251,0),24 },
+       { IPv4(203,6,135,0),24 },
+       { IPv4(203,6,156,0),24 },
+       { IPv4(203,7,132,0),24 },
+       { IPv4(203,7,133,0),24 },
+       { IPv4(203,7,134,0),24 },
+       { IPv4(203,7,135,0),24 },
+       { IPv4(203,7,137,0),24 },
+       { IPv4(203,7,198,0),24 },
+       { IPv4(203,7,208,0),20 },
+       { IPv4(203,7,255,0),24 },
+       { IPv4(203,8,1,0),24 },
+       { IPv4(203,8,4,0),24 },
+       { IPv4(203,8,4,0),22 },
+       { IPv4(203,8,5,0),24 },
+       { IPv4(203,8,6,0),24 },
+       { IPv4(203,8,7,0),24 },
+       { IPv4(203,8,12,0),22 },
+       { IPv4(203,8,20,0),24 },
+       { IPv4(203,8,71,0),24 },
+       { IPv4(203,8,84,0),24 },
+       { IPv4(203,8,94,0),24 },
+       { IPv4(203,8,113,0),24 },
+       { IPv4(203,8,114,0),24 },
+       { IPv4(203,8,163,0),24 },
+       { IPv4(203,8,164,0),24 },
+       { IPv4(203,8,170,0),24 },
+       { IPv4(203,8,171,0),24 },
+       { IPv4(203,8,174,0),24 },
+       { IPv4(203,8,176,0),21 },
+       { IPv4(203,8,185,0),24 },
+       { IPv4(203,8,194,0),24 },
+       { IPv4(203,8,200,0),24 },
+       { IPv4(203,8,201,0),24 },
+       { IPv4(203,8,202,0),24 },
+       { IPv4(203,9,35,0),24 },
+       { IPv4(203,9,68,0),22 },
+       { IPv4(203,9,84,0),24 },
+       { IPv4(203,9,102,0),24 },
+       { IPv4(203,9,124,0),24 },
+       { IPv4(203,9,125,0),24 },
+       { IPv4(203,9,151,0),24 },
+       { IPv4(203,9,157,0),24 },
+       { IPv4(203,9,190,0),23 },
+       { IPv4(203,10,36,0),24 },
+       { IPv4(203,10,78,0),24 },
+       { IPv4(203,11,75,0),24 },
+       { IPv4(203,11,81,0),24 },
+       { IPv4(203,11,128,0),22 },
+       { IPv4(203,11,132,0),22 },
+       { IPv4(203,11,140,0),24 },
+       { IPv4(203,11,167,0),24 },
+       { IPv4(203,11,177,0),24 },
+       { IPv4(203,11,178,0),23 },
+       { IPv4(203,11,222,0),23 },
+       { IPv4(203,12,30,0),24 },
+       { IPv4(203,12,31,0),24 },
+       { IPv4(203,12,42,0),24 },
+       { IPv4(203,12,48,0),22 },
+       { IPv4(203,12,51,0),24 },
+       { IPv4(203,12,83,0),24 },
+       { IPv4(203,12,97,0),24 },
+       { IPv4(203,12,115,0),24 },
+       { IPv4(203,12,144,0),21 },
+       { IPv4(203,12,163,0),24 },
+       { IPv4(203,12,172,0),22 },
+       { IPv4(203,12,216,0),23 },
+       { IPv4(203,12,235,0),24 },
+       { IPv4(203,12,236,0),24 },
+       { IPv4(203,12,236,0),22 },
+       { IPv4(203,12,237,0),24 },
+       { IPv4(203,12,238,0),24 },
+       { IPv4(203,12,239,0),24 },
+       { IPv4(203,13,23,0),24 },
+       { IPv4(203,13,25,0),24 },
+       { IPv4(203,13,35,0),24 },
+       { IPv4(203,13,74,0),24 },
+       { IPv4(203,13,144,0),24 },
+       { IPv4(203,13,174,0),24 },
+       { IPv4(203,13,220,0),23 },
+       { IPv4(203,14,59,0),24 },
+       { IPv4(203,14,105,0),24 },
+       { IPv4(203,14,111,0),24 },
+       { IPv4(203,14,167,0),24 },
+       { IPv4(203,14,177,0),24 },
+       { IPv4(203,14,180,0),24 },
+       { IPv4(203,14,202,0),24 },
+       { IPv4(203,14,212,0),24 },
+       { IPv4(203,14,223,0),24 },
+       { IPv4(203,15,68,0),24 },
+       { IPv4(203,15,69,0),24 },
+       { IPv4(203,15,95,0),24 },
+       { IPv4(203,15,104,0),24 },
+       { IPv4(203,15,108,0),24 },
+       { IPv4(203,15,120,0),23 },
+       { IPv4(203,15,134,0),23 },
+       { IPv4(203,15,138,0),24 },
+       { IPv4(203,15,141,0),24 },
+       { IPv4(203,15,142,0),24 },
+       { IPv4(203,15,148,0),24 },
+       { IPv4(203,15,152,0),24 },
+       { IPv4(203,15,153,0),24 },
+       { IPv4(203,15,249,0),24 },
+       { IPv4(203,15,251,0),24 },
+       { IPv4(203,15,252,0),24 },
+       { IPv4(203,16,26,0),24 },
+       { IPv4(203,16,33,0),24 },
+       { IPv4(203,16,35,0),24 },
+       { IPv4(203,16,52,0),23 },
+       { IPv4(203,16,54,0),24 },
+       { IPv4(203,16,60,0),24 },
+       { IPv4(203,16,61,0),24 },
+       { IPv4(203,16,139,0),24 },
+       { IPv4(203,16,143,0),24 },
+       { IPv4(203,16,169,0),24 },
+       { IPv4(203,16,170,0),24 },
+       { IPv4(203,16,176,0),24 },
+       { IPv4(203,16,180,0),22 },
+       { IPv4(203,16,192,0),23 },
+       { IPv4(203,16,225,0),24 },
+       { IPv4(203,16,226,0),24 },
+       { IPv4(203,16,232,0),24 },
+       { IPv4(203,16,233,0),24 },
+       { IPv4(203,16,246,0),24 },
+       { IPv4(203,17,19,0),24 },
+       { IPv4(203,17,22,0),24 },
+       { IPv4(203,17,40,0),21 },
+       { IPv4(203,17,43,0),24 },
+       { IPv4(203,17,54,0),24 },
+       { IPv4(203,17,69,0),24 },
+       { IPv4(203,17,71,0),24 },
+       { IPv4(203,17,112,0),24 },
+       { IPv4(203,17,113,0),24 },
+       { IPv4(203,17,123,0),24 },
+       { IPv4(203,17,125,0),24 },
+       { IPv4(203,17,162,0),24 },
+       { IPv4(203,17,165,0),24 },
+       { IPv4(203,17,167,0),24 },
+       { IPv4(203,17,168,0),21 },
+       { IPv4(203,17,183,0),24 },
+       { IPv4(203,17,192,0),24 },
+       { IPv4(203,17,253,0),24 },
+       { IPv4(203,18,0,0),24 },
+       { IPv4(203,18,6,0),24 },
+       { IPv4(203,18,22,0),24 },
+       { IPv4(203,18,28,0),24 },
+       { IPv4(203,18,38,0),24 },
+       { IPv4(203,18,39,0),24 },
+       { IPv4(203,18,143,0),24 },
+       { IPv4(203,18,174,0),24 },
+       { IPv4(203,18,246,0),24 },
+       { IPv4(203,19,0,0),24 },
+       { IPv4(203,19,2,0),24 },
+       { IPv4(203,19,4,0),24 },
+       { IPv4(203,19,12,0),24 },
+       { IPv4(203,19,22,0),24 },
+       { IPv4(203,19,31,0),24 },
+       { IPv4(203,19,47,0),24 },
+       { IPv4(203,19,53,0),24 },
+       { IPv4(203,19,75,0),24 },
+       { IPv4(203,19,80,0),24 },
+       { IPv4(203,19,87,0),24 },
+       { IPv4(203,19,88,0),24 },
+       { IPv4(203,19,120,0),24 },
+       { IPv4(203,19,121,0),24 },
+       { IPv4(203,19,122,0),24 },
+       { IPv4(203,19,124,0),24 },
+       { IPv4(203,19,126,0),24 },
+       { IPv4(203,19,127,0),24 },
+       { IPv4(203,19,132,0),24 },
+       { IPv4(203,19,147,0),24 },
+       { IPv4(203,19,157,0),24 },
+       { IPv4(203,19,243,0),24 },
+       { IPv4(203,19,251,0),24 },
+       { IPv4(203,19,252,0),24 },
+       { IPv4(203,20,25,0),24 },
+       { IPv4(203,20,32,0),24 },
+       { IPv4(203,20,36,0),24 },
+       { IPv4(203,20,44,0),24 },
+       { IPv4(203,20,45,0),24 },
+       { IPv4(203,20,52,0),24 },
+       { IPv4(203,20,53,0),24 },
+       { IPv4(203,20,62,0),24 },
+       { IPv4(203,20,72,0),24 },
+       { IPv4(203,20,80,0),24 },
+       { IPv4(203,20,97,0),24 },
+       { IPv4(203,20,99,0),24 },
+       { IPv4(203,20,102,0),23 },
+       { IPv4(203,20,115,0),24 },
+       { IPv4(203,20,125,0),24 },
+       { IPv4(203,20,234,0),24 },
+       { IPv4(203,20,244,0),24 },
+       { IPv4(203,20,245,0),24 },
+       { IPv4(203,21,20,0),24 },
+       { IPv4(203,21,46,0),24 },
+       { IPv4(203,21,67,0),24 },
+       { IPv4(203,21,122,0),24 },
+       { IPv4(203,21,123,0),24 },
+       { IPv4(203,21,125,0),24 },
+       { IPv4(203,21,127,0),24 },
+       { IPv4(203,21,132,0),24 },
+       { IPv4(203,21,134,0),24 },
+       { IPv4(203,21,150,0),23 },
+       { IPv4(203,21,216,0),24 },
+       { IPv4(203,22,18,0),24 },
+       { IPv4(203,22,19,0),24 },
+       { IPv4(203,22,70,0),24 },
+       { IPv4(203,22,82,0),24 },
+       { IPv4(203,22,110,0),23 },
+       { IPv4(203,22,130,0),24 },
+       { IPv4(203,22,132,0),24 },
+       { IPv4(203,22,192,0),24 },
+       { IPv4(203,22,205,0),24 },
+       { IPv4(203,22,214,0),24 },
+       { IPv4(203,22,229,0),24 },
+       { IPv4(203,22,249,0),24 },
+       { IPv4(203,22,254,0),24 },
+       { IPv4(203,23,3,0),24 },
+       { IPv4(203,23,14,0),24 },
+       { IPv4(203,23,17,0),24 },
+       { IPv4(203,23,29,0),24 },
+       { IPv4(203,23,32,0),22 },
+       { IPv4(203,23,42,0),24 },
+       { IPv4(203,23,43,0),24 },
+       { IPv4(203,23,50,0),24 },
+       { IPv4(203,23,53,0),24 },
+       { IPv4(203,23,77,0),24 },
+       { IPv4(203,23,78,0),23 },
+       { IPv4(203,23,83,0),24 },
+       { IPv4(203,23,87,0),24 },
+       { IPv4(203,23,88,0),23 },
+       { IPv4(203,23,97,0),24 },
+       { IPv4(203,23,111,0),24 },
+       { IPv4(203,23,155,0),24 },
+       { IPv4(203,23,156,0),23 },
+       { IPv4(203,23,164,0),24 },
+       { IPv4(203,23,165,0),24 },
+       { IPv4(203,23,166,0),24 },
+       { IPv4(203,23,175,0),24 },
+       { IPv4(203,23,186,0),24 },
+       { IPv4(203,23,190,0),24 },
+       { IPv4(203,23,200,0),24 },
+       { IPv4(203,23,201,0),24 },
+       { IPv4(203,23,202,0),24 },
+       { IPv4(203,23,203,0),24 },
+       { IPv4(203,23,225,0),24 },
+       { IPv4(203,23,236,0),24 },
+       { IPv4(203,23,237,0),24 },
+       { IPv4(203,23,238,0),24 },
+       { IPv4(203,23,239,0),24 },
+       { IPv4(203,24,19,0),24 },
+       { IPv4(203,24,52,0),24 },
+       { IPv4(203,24,53,0),24 },
+       { IPv4(203,24,62,0),24 },
+       { IPv4(203,24,66,0),24 },
+       { IPv4(203,24,70,0),23 },
+       { IPv4(203,24,75,0),24 },
+       { IPv4(203,24,82,0),23 },
+       { IPv4(203,24,91,0),24 },
+       { IPv4(203,24,95,0),24 },
+       { IPv4(203,24,105,0),24 },
+       { IPv4(203,24,107,0),24 },
+       { IPv4(203,24,110,0),24 },
+       { IPv4(203,24,126,0),24 },
+       { IPv4(203,24,127,0),24 },
+       { IPv4(203,24,134,0),23 },
+       { IPv4(203,24,150,0),24 },
+       { IPv4(203,24,163,0),24 },
+       { IPv4(203,24,214,0),24 },
+       { IPv4(203,24,215,0),24 },
+       { IPv4(203,24,218,0),24 },
+       { IPv4(203,24,241,0),24 },
+       { IPv4(203,24,246,0),24 },
+       { IPv4(203,24,251,0),24 },
+       { IPv4(203,25,25,0),24 },
+       { IPv4(203,25,67,0),24 },
+       { IPv4(203,25,76,0),24 },
+       { IPv4(203,25,79,0),24 },
+       { IPv4(203,25,84,0),23 },
+       { IPv4(203,25,96,0),24 },
+       { IPv4(203,25,110,0),23 },
+       { IPv4(203,25,119,0),24 },
+       { IPv4(203,25,120,0),24 },
+       { IPv4(203,25,129,0),24 },
+       { IPv4(203,25,130,0),24 },
+       { IPv4(203,25,148,0),24 },
+       { IPv4(203,25,159,0),24 },
+       { IPv4(203,25,165,0),24 },
+       { IPv4(203,25,178,0),24 },
+       { IPv4(203,25,183,0),24 },
+       { IPv4(203,25,188,0),24 },
+       { IPv4(203,25,189,0),24 },
+       { IPv4(203,25,192,0),24 },
+       { IPv4(203,25,193,0),24 },
+       { IPv4(203,25,195,0),24 },
+       { IPv4(203,26,8,0),22 },
+       { IPv4(203,26,18,0),24 },
+       { IPv4(203,26,20,0),24 },
+       { IPv4(203,26,21,0),24 },
+       { IPv4(203,26,25,0),24 },
+       { IPv4(203,26,26,0),24 },
+       { IPv4(203,26,28,0),24 },
+       { IPv4(203,26,37,0),24 },
+       { IPv4(203,26,38,0),24 },
+       { IPv4(203,26,39,0),24 },
+       { IPv4(203,26,45,0),24 },
+       { IPv4(203,26,47,0),24 },
+       { IPv4(203,26,54,0),24 },
+       { IPv4(203,26,66,0),23 },
+       { IPv4(203,26,79,0),24 },
+       { IPv4(203,26,82,0),23 },
+       { IPv4(203,26,89,0),24 },
+       { IPv4(203,26,108,0),24 },
+       { IPv4(203,26,112,0),24 },
+       { IPv4(203,26,127,0),24 },
+       { IPv4(203,26,130,0),24 },
+       { IPv4(203,26,141,0),24 },
+       { IPv4(203,26,215,0),24 },
+       { IPv4(203,26,216,0),24 },
+       { IPv4(203,26,225,0),24 },
+       { IPv4(203,26,237,0),24 },
+       { IPv4(203,26,240,0),23 },
+       { IPv4(203,26,247,0),24 },
+       { IPv4(203,27,1,0),24 },
+       { IPv4(203,27,5,0),24 },
+       { IPv4(203,27,9,0),24 },
+       { IPv4(203,27,18,0),24 },
+       { IPv4(203,27,47,0),24 },
+       { IPv4(203,27,49,0),24 },
+       { IPv4(203,27,51,0),24 },
+       { IPv4(203,27,68,0),24 },
+       { IPv4(203,27,69,0),24 },
+       { IPv4(203,27,85,0),24 },
+       { IPv4(203,27,87,0),24 },
+       { IPv4(203,27,90,0),24 },
+       { IPv4(203,27,91,0),24 },
+       { IPv4(203,27,92,0),24 },
+       { IPv4(203,27,100,0),24 },
+       { IPv4(203,27,104,0),23 },
+       { IPv4(203,27,192,0),24 },
+       { IPv4(203,27,203,0),24 },
+       { IPv4(203,27,208,0),24 },
+       { IPv4(203,27,209,0),24 },
+       { IPv4(203,27,210,0),24 },
+       { IPv4(203,27,211,0),24 },
+       { IPv4(203,27,212,0),24 },
+       { IPv4(203,27,213,0),24 },
+       { IPv4(203,27,214,0),24 },
+       { IPv4(203,27,215,0),24 },
+       { IPv4(203,27,222,0),24 },
+       { IPv4(203,27,231,0),24 },
+       { IPv4(203,27,242,0),24 },
+       { IPv4(203,27,248,0),24 },
+       { IPv4(203,28,17,0),24 },
+       { IPv4(203,28,22,0),24 },
+       { IPv4(203,28,32,0),24 },
+       { IPv4(203,28,52,0),23 },
+       { IPv4(203,28,94,0),23 },
+       { IPv4(203,28,95,0),24 },
+       { IPv4(203,28,116,0),22 },
+       { IPv4(203,28,134,0),23 },
+       { IPv4(203,28,147,0),24 },
+       { IPv4(203,28,171,0),24 },
+       { IPv4(203,28,173,0),24 },
+       { IPv4(203,28,193,0),24 },
+       { IPv4(203,28,207,0),24 },
+       { IPv4(203,28,208,0),24 },
+       { IPv4(203,28,209,0),24 },
+       { IPv4(203,28,210,0),24 },
+       { IPv4(203,28,211,0),24 },
+       { IPv4(203,28,232,0),24 },
+       { IPv4(203,28,238,0),24 },
+       { IPv4(203,29,3,0),24 },
+       { IPv4(203,29,19,0),24 },
+       { IPv4(203,29,65,0),24 },
+       { IPv4(203,29,70,0),24 },
+       { IPv4(203,29,74,0),24 },
+       { IPv4(203,29,91,0),24 },
+       { IPv4(203,29,92,0),24 },
+       { IPv4(203,29,93,0),24 },
+       { IPv4(203,29,114,0),23 },
+       { IPv4(203,29,119,0),24 },
+       { IPv4(203,29,125,0),24 },
+       { IPv4(203,29,127,0),24 },
+       { IPv4(203,29,128,0),24 },
+       { IPv4(203,29,130,0),24 },
+       { IPv4(203,29,138,0),24 },
+       { IPv4(203,29,140,0),24 },
+       { IPv4(203,29,141,0),24 },
+       { IPv4(203,29,142,0),24 },
+       { IPv4(203,29,150,0),24 },
+       { IPv4(203,29,151,0),24 },
+       { IPv4(203,29,153,0),24 },
+       { IPv4(203,29,155,0),24 },
+       { IPv4(203,29,156,0),24 },
+       { IPv4(203,29,159,0),24 },
+       { IPv4(203,29,181,0),24 },
+       { IPv4(203,29,184,0),24 },
+       { IPv4(203,29,189,0),24 },
+       { IPv4(203,29,190,0),24 },
+       { IPv4(203,29,218,0),24 },
+       { IPv4(203,29,221,0),24 },
+       { IPv4(203,29,224,0),23 },
+       { IPv4(203,29,230,0),24 },
+       { IPv4(203,29,236,0),24 },
+       { IPv4(203,29,243,0),24 },
+       { IPv4(203,29,250,0),23 },
+       { IPv4(203,30,14,0),24 },
+       { IPv4(203,30,15,0),24 },
+       { IPv4(203,30,19,0),24 },
+       { IPv4(203,30,24,0),24 },
+       { IPv4(203,30,50,0),24 },
+       { IPv4(203,30,62,0),23 },
+       { IPv4(203,30,68,0),24 },
+       { IPv4(203,30,85,0),24 },
+       { IPv4(203,30,88,0),24 },
+       { IPv4(203,30,96,0),24 },
+       { IPv4(203,30,98,0),23 },
+       { IPv4(203,30,105,0),24 },
+       { IPv4(203,30,130,0),24 },
+       { IPv4(203,30,140,0),24 },
+       { IPv4(203,30,141,0),24 },
+       { IPv4(203,30,142,0),24 },
+       { IPv4(203,30,158,0),24 },
+       { IPv4(203,30,171,0),24 },
+       { IPv4(203,30,175,0),24 },
+       { IPv4(203,30,184,0),23 },
+       { IPv4(203,30,194,0),24 },
+       { IPv4(203,30,202,0),24 },
+       { IPv4(203,30,208,0),24 },
+       { IPv4(203,30,210,0),24 },
+       { IPv4(203,30,213,0),24 },
+       { IPv4(203,30,216,0),24 },
+       { IPv4(203,30,228,0),23 },
+       { IPv4(203,30,230,0),24 },
+       { IPv4(203,30,236,0),23 },
+       { IPv4(203,30,247,0),24 },
+       { IPv4(203,30,248,0),24 },
+       { IPv4(203,30,254,0),24 },
+       { IPv4(203,30,255,0),24 },
+       { IPv4(203,31,8,0),24 },
+       { IPv4(203,31,9,0),24 },
+       { IPv4(203,31,22,0),24 },
+       { IPv4(203,31,23,0),24 },
+       { IPv4(203,31,32,0),22 },
+       { IPv4(203,31,44,0),24 },
+       { IPv4(203,31,48,0),24 },
+       { IPv4(203,31,59,0),24 },
+       { IPv4(203,31,66,0),24 },
+       { IPv4(203,31,71,0),24 },
+       { IPv4(203,31,79,0),24 },
+       { IPv4(203,31,86,0),24 },
+       { IPv4(203,31,93,0),24 },
+       { IPv4(203,31,96,0),24 },
+       { IPv4(203,31,121,0),24 },
+       { IPv4(203,31,122,0),24 },
+       { IPv4(203,31,123,0),24 },
+       { IPv4(203,31,164,0),24 },
+       { IPv4(203,31,165,0),24 },
+       { IPv4(203,31,169,0),24 },
+       { IPv4(203,31,173,0),24 },
+       { IPv4(203,31,184,0),24 },
+       { IPv4(203,31,194,0),24 },
+       { IPv4(203,31,240,0),24 },
+       { IPv4(203,32,0,0),23 },
+       { IPv4(203,32,2,0),24 },
+       { IPv4(203,32,3,0),24 },
+       { IPv4(203,32,57,0),24 },
+       { IPv4(203,32,65,0),24 },
+       { IPv4(203,32,72,0),23 },
+       { IPv4(203,32,74,0),24 },
+       { IPv4(203,32,89,0),24 },
+       { IPv4(203,32,94,0),24 },
+       { IPv4(203,32,103,0),24 },
+       { IPv4(203,32,111,0),24 },
+       { IPv4(203,32,135,0),24 },
+       { IPv4(203,32,143,0),24 },
+       { IPv4(203,32,158,0),24 },
+       { IPv4(203,32,176,0),24 },
+       { IPv4(203,32,194,0),24 },
+       { IPv4(203,32,202,0),24 },
+       { IPv4(203,32,208,0),22 },
+       { IPv4(203,32,224,0),19 },
+       { IPv4(203,33,2,0),24 },
+       { IPv4(203,33,6,0),24 },
+       { IPv4(203,33,31,0),24 },
+       { IPv4(203,33,39,0),24 },
+       { IPv4(203,33,77,0),24 },
+       { IPv4(203,33,96,0),22 },
+       { IPv4(203,33,105,0),24 },
+       { IPv4(203,33,133,0),24 },
+       { IPv4(203,33,134,0),24 },
+       { IPv4(203,33,136,0),22 },
+       { IPv4(203,33,168,0),24 },
+       { IPv4(203,33,171,0),24 },
+       { IPv4(203,33,178,0),24 },
+       { IPv4(203,33,182,0),24 },
+       { IPv4(203,33,188,0),24 },
+       { IPv4(203,33,191,0),24 },
+       { IPv4(203,33,237,0),24 },
+       { IPv4(203,33,240,0),24 },
+       { IPv4(203,33,251,0),24 },
+       { IPv4(203,34,24,0),24 },
+       { IPv4(203,34,36,0),24 },
+       { IPv4(203,34,37,0),24 },
+       { IPv4(203,34,40,0),24 },
+       { IPv4(203,34,61,0),24 },
+       { IPv4(203,34,62,0),24 },
+       { IPv4(203,34,69,0),24 },
+       { IPv4(203,34,110,0),24 },
+       { IPv4(203,34,126,0),23 },
+       { IPv4(203,34,137,0),24 },
+       { IPv4(203,34,140,0),24 },
+       { IPv4(203,34,151,0),24 },
+       { IPv4(203,34,156,0),22 },
+       { IPv4(203,34,180,0),24 },
+       { IPv4(203,34,184,0),24 },
+       { IPv4(203,34,202,0),24 },
+       { IPv4(203,34,218,0),24 },
+       { IPv4(203,35,128,0),23 },
+       { IPv4(203,37,38,0),24 },
+       { IPv4(203,37,165,0),24 },
+       { IPv4(203,37,169,0),24 },
+       { IPv4(203,37,185,0),24 },
+       { IPv4(203,38,138,0),24 },
+       { IPv4(203,38,140,0),22 },
+       { IPv4(203,44,170,0),23 },
+       { IPv4(203,52,18,0),24 },
+       { IPv4(203,55,6,0),23 },
+       { IPv4(203,55,20,0),24 },
+       { IPv4(203,55,23,0),24 },
+       { IPv4(203,55,33,0),24 },
+       { IPv4(203,55,46,0),23 },
+       { IPv4(203,55,48,0),23 },
+       { IPv4(203,55,50,0),24 },
+       { IPv4(203,55,51,0),24 },
+       { IPv4(203,55,54,0),24 },
+       { IPv4(203,55,54,0),23 },
+       { IPv4(203,55,55,0),24 },
+       { IPv4(203,55,65,0),24 },
+       { IPv4(203,55,69,0),24 },
+       { IPv4(203,55,83,0),24 },
+       { IPv4(203,55,102,0),24 },
+       { IPv4(203,55,102,0),23 },
+       { IPv4(203,55,103,0),24 },
+       { IPv4(203,55,105,0),24 },
+       { IPv4(203,55,123,0),24 },
+       { IPv4(203,55,138,0),24 },
+       { IPv4(203,55,142,0),24 },
+       { IPv4(203,55,144,0),24 },
+       { IPv4(203,55,145,0),24 },
+       { IPv4(203,55,158,0),24 },
+       { IPv4(203,55,160,0),24 },
+       { IPv4(203,55,161,0),24 },
+       { IPv4(203,55,176,0),24 },
+       { IPv4(203,55,191,0),24 },
+       { IPv4(203,55,204,0),23 },
+       { IPv4(203,55,215,0),24 },
+       { IPv4(203,55,226,0),24 },
+       { IPv4(203,55,227,0),24 },
+       { IPv4(203,55,250,0),24 },
+       { IPv4(203,56,0,0),24 },
+       { IPv4(203,56,2,0),24 },
+       { IPv4(203,56,3,0),24 },
+       { IPv4(203,56,8,0),24 },
+       { IPv4(203,56,18,0),24 },
+       { IPv4(203,56,20,0),24 },
+       { IPv4(203,56,34,0),24 },
+       { IPv4(203,56,37,0),24 },
+       { IPv4(203,56,88,0),24 },
+       { IPv4(203,56,89,0),24 },
+       { IPv4(203,56,94,0),24 },
+       { IPv4(203,56,98,0),24 },
+       { IPv4(203,56,116,0),23 },
+       { IPv4(203,56,136,0),23 },
+       { IPv4(203,56,186,0),24 },
+       { IPv4(203,56,208,0),22 },
+       { IPv4(203,56,213,0),24 },
+       { IPv4(203,56,218,0),24 },
+       { IPv4(203,56,234,0),24 },
+       { IPv4(203,56,241,0),24 },
+       { IPv4(203,56,244,0),24 },
+       { IPv4(203,56,246,0),24 },
+       { IPv4(203,56,247,0),24 },
+       { IPv4(203,56,248,0),24 },
+       { IPv4(203,56,253,0),24 },
+       { IPv4(203,57,10,0),24 },
+       { IPv4(203,57,20,0),24 },
+       { IPv4(203,57,25,0),24 },
+       { IPv4(203,57,36,0),23 },
+       { IPv4(203,57,48,0),24 },
+       { IPv4(203,57,49,0),24 },
+       { IPv4(203,57,52,0),22 },
+       { IPv4(203,57,56,0),23 },
+       { IPv4(203,57,75,0),24 },
+       { IPv4(203,57,91,0),24 },
+       { IPv4(203,57,92,0),24 },
+       { IPv4(203,57,110,0),24 },
+       { IPv4(203,57,118,0),23 },
+       { IPv4(203,57,121,0),24 },
+       { IPv4(203,57,147,0),24 },
+       { IPv4(203,57,149,0),24 },
+       { IPv4(203,57,192,0),23 },
+       { IPv4(203,57,192,0),24 },
+       { IPv4(203,57,194,0),24 },
+       { IPv4(203,57,204,0),24 },
+       { IPv4(203,57,222,0),24 },
+       { IPv4(203,57,252,0),22 },
+       { IPv4(203,58,0,0),24 },
+       { IPv4(203,58,9,0),24 },
+       { IPv4(203,58,13,0),24 },
+       { IPv4(203,58,17,0),24 },
+       { IPv4(203,58,18,0),24 },
+       { IPv4(203,58,21,0),24 },
+       { IPv4(203,58,22,0),24 },
+       { IPv4(203,58,24,0),24 },
+       { IPv4(203,58,31,0),24 },
+       { IPv4(203,58,58,0),24 },
+       { IPv4(203,58,59,0),24 },
+       { IPv4(203,58,117,0),24 },
+       { IPv4(203,58,118,0),24 },
+       { IPv4(203,58,119,0),24 },
+       { IPv4(203,58,134,0),24 },
+       { IPv4(203,60,19,0),24 },
+       { IPv4(203,62,130,0),24 },
+       { IPv4(203,62,136,0),23 },
+       { IPv4(203,62,144,0),24 },
+       { IPv4(203,62,148,0),22 },
+       { IPv4(203,62,150,0),24 },
+       { IPv4(203,62,170,0),24 },
+       { IPv4(203,62,190,0),24 },
+       { IPv4(203,63,0,0),16 },
+       { IPv4(203,63,99,0),24 },
+       { IPv4(203,64,0,0),16 },
+       { IPv4(203,65,0,0),17 },
+       { IPv4(203,65,128,0),19 },
+       { IPv4(203,65,192,0),19 },
+       { IPv4(203,65,224,0),21 },
+       { IPv4(203,65,232,0),22 },
+       { IPv4(203,65,240,0),22 },
+       { IPv4(203,65,244,0),22 },
+       { IPv4(203,65,248,0),21 },
+       { IPv4(203,67,0,0),16 },
+       { IPv4(203,67,175,0),24 },
+       { IPv4(203,68,0,0),16 },
+       { IPv4(203,70,0,0),16 },
+       { IPv4(203,70,62,0),24 },
+       { IPv4(203,71,0,0),16 },
+       { IPv4(203,72,0,0),19 },
+       { IPv4(203,72,32,0),22 },
+       { IPv4(203,72,36,0),23 },
+       { IPv4(203,72,38,0),23 },
+       { IPv4(203,72,40,0),21 },
+       { IPv4(203,72,48,0),20 },
+       { IPv4(203,72,64,0),18 },
+       { IPv4(203,72,128,0),17 },
+       { IPv4(203,73,0,0),16 },
+       { IPv4(203,73,64,0),18 },
+       { IPv4(203,73,192,0),18 },
+       { IPv4(203,73,250,0),24 },
+       { IPv4(203,77,224,0),21 },
+       { IPv4(203,77,232,0),21 },
+       { IPv4(203,77,241,0),24 },
+       { IPv4(203,77,254,0),24 },
+       { IPv4(203,77,255,0),24 },
+       { IPv4(203,78,128,0),24 },
+       { IPv4(203,78,130,0),24 },
+       { IPv4(203,80,64,0),24 },
+       { IPv4(203,80,66,0),23 },
+       { IPv4(203,84,63,0),24 },
+       { IPv4(203,86,96,0),19 },
+       { IPv4(203,86,156,0),24 },
+       { IPv4(203,87,0,0),20 },
+       { IPv4(203,87,16,0),23 },
+       { IPv4(203,87,18,0),24 },
+       { IPv4(203,87,19,0),24 },
+       { IPv4(203,87,20,0),24 },
+       { IPv4(203,87,21,0),24 },
+       { IPv4(203,87,22,0),23 },
+       { IPv4(203,87,25,0),24 },
+       { IPv4(203,87,26,0),24 },
+       { IPv4(203,87,27,0),24 },
+       { IPv4(203,87,32,0),20 },
+       { IPv4(203,87,48,0),23 },
+       { IPv4(203,87,51,0),24 },
+       { IPv4(203,87,53,0),24 },
+       { IPv4(203,87,57,0),24 },
+       { IPv4(203,87,58,0),23 },
+       { IPv4(203,87,60,0),24 },
+       { IPv4(203,87,61,0),24 },
+       { IPv4(203,87,62,0),24 },
+       { IPv4(203,87,63,0),24 },
+       { IPv4(203,87,64,0),24 },
+       { IPv4(203,87,65,0),24 },
+       { IPv4(203,87,66,0),23 },
+       { IPv4(203,87,69,0),24 },
+       { IPv4(203,87,70,0),24 },
+       { IPv4(203,87,71,0),24 },
+       { IPv4(203,87,73,0),24 },
+       { IPv4(203,87,74,0),24 },
+       { IPv4(203,87,75,0),24 },
+       { IPv4(203,87,76,0),24 },
+       { IPv4(203,87,77,0),24 },
+       { IPv4(203,87,78,0),24 },
+       { IPv4(203,87,79,0),24 },
+       { IPv4(203,87,80,0),23 },
+       { IPv4(203,87,82,0),23 },
+       { IPv4(203,87,84,0),22 },
+       { IPv4(203,87,88,0),23 },
+       { IPv4(203,87,90,0),23 },
+       { IPv4(203,87,92,0),22 },
+       { IPv4(203,87,128,0),20 },
+       { IPv4(203,87,132,0),24 },
+       { IPv4(203,88,0,0),22 },
+       { IPv4(203,88,133,0),24 },
+       { IPv4(203,88,134,0),24 },
+       { IPv4(203,88,135,0),24 },
+       { IPv4(203,88,136,0),24 },
+       { IPv4(203,88,137,0),24 },
+       { IPv4(203,88,141,0),24 },
+       { IPv4(203,88,142,0),24 },
+       { IPv4(203,88,143,0),24 },
+       { IPv4(203,88,144,0),24 },
+       { IPv4(203,88,145,0),24 },
+       { IPv4(203,88,146,0),24 },
+       { IPv4(203,88,147,0),24 },
+       { IPv4(203,89,64,0),19 },
+       { IPv4(203,90,0,0),22 },
+       { IPv4(203,90,192,0),20 },
+       { IPv4(203,91,224,0),19 },
+       { IPv4(203,91,224,0),24 },
+       { IPv4(203,91,226,0),24 },
+       { IPv4(203,91,233,0),24 },
+       { IPv4(203,91,235,0),24 },
+       { IPv4(203,91,236,0),24 },
+       { IPv4(203,91,237,0),24 },
+       { IPv4(203,91,249,0),24 },
+       { IPv4(203,91,250,0),24 },
+       { IPv4(203,92,64,0),19 },
+       { IPv4(203,92,128,0),19 },
+       { IPv4(203,93,248,0),21 },
+       { IPv4(203,94,64,0),18 },
+       { IPv4(203,95,128,0),18 },
+       { IPv4(203,95,192,0),18 },
+       { IPv4(203,96,16,0),20 },
+       { IPv4(203,96,48,0),20 },
+       { IPv4(203,96,96,0),19 },
+       { IPv4(203,96,120,0),23 },
+       { IPv4(203,96,128,0),20 },
+       { IPv4(203,97,0,0),17 },
+       { IPv4(203,98,64,0),19 },
+       { IPv4(203,98,64,0),20 },
+       { IPv4(203,98,80,0),20 },
+       { IPv4(203,98,95,0),24 },
+       { IPv4(203,99,65,0),24 },
+       { IPv4(203,99,66,0),24 },
+       { IPv4(203,99,71,0),24 },
+       { IPv4(203,100,224,0),19 },
+       { IPv4(203,100,232,0),24 },
+       { IPv4(203,100,238,0),23 },
+       { IPv4(203,100,247,0),24 },
+       { IPv4(203,101,128,0),19 },
+       { IPv4(203,101,152,0),21 },
+       { IPv4(203,105,128,0),21 },
+       { IPv4(203,105,136,0),22 },
+       { IPv4(203,105,140,0),23 },
+       { IPv4(203,105,142,0),23 },
+       { IPv4(203,105,144,0),24 },
+       { IPv4(203,106,0,0),16 },
+       { IPv4(203,107,0,0),18 },
+       { IPv4(203,107,128,0),17 },
+       { IPv4(203,107,128,0),22 },
+       { IPv4(203,107,144,0),20 },
+       { IPv4(203,109,140,0),24 },
+       { IPv4(203,109,141,0),24 },
+       { IPv4(203,109,160,0),24 },
+       { IPv4(203,109,161,0),24 },
+       { IPv4(203,109,172,0),22 },
+       { IPv4(203,109,202,0),24 },
+       { IPv4(203,109,206,0),24 },
+       { IPv4(203,109,210,0),24 },
+       { IPv4(203,109,212,0),22 },
+       { IPv4(203,109,217,0),24 },
+       { IPv4(203,109,220,0),24 },
+       { IPv4(203,109,224,0),22 },
+       { IPv4(203,109,240,0),21 },
+       { IPv4(203,109,248,0),22 },
+       { IPv4(203,110,159,0),24 },
+       { IPv4(203,111,192,0),20 },
+       { IPv4(203,112,9,0),24 },
+       { IPv4(203,112,224,0),19 },
+       { IPv4(203,113,0,0),19 },
+       { IPv4(203,114,224,0),23 },
+       { IPv4(203,114,226,0),23 },
+       { IPv4(203,114,228,0),23 },
+       { IPv4(203,114,230,0),23 },
+       { IPv4(203,114,232,0),23 },
+       { IPv4(203,114,234,0),23 },
+       { IPv4(203,114,236,0),23 },
+       { IPv4(203,114,238,0),23 },
+       { IPv4(203,114,240,0),23 },
+       { IPv4(203,114,242,0),23 },
+       { IPv4(203,114,244,0),23 },
+       { IPv4(203,114,246,0),23 },
+       { IPv4(203,114,248,0),23 },
+       { IPv4(203,114,250,0),23 },
+       { IPv4(203,114,252,0),23 },
+       { IPv4(203,114,254,0),23 },
+       { IPv4(203,115,0,0),18 },
+       { IPv4(203,115,96,0),22 },
+       { IPv4(203,115,100,0),23 },
+       { IPv4(203,115,116,0),23 },
+       { IPv4(203,115,118,0),23 },
+       { IPv4(203,115,125,0),24 },
+       { IPv4(203,115,126,0),24 },
+       { IPv4(203,116,0,0),16 },
+       { IPv4(203,116,23,0),24 },
+       { IPv4(203,116,61,0),24 },
+       { IPv4(203,116,81,0),24 },
+       { IPv4(203,116,255,0),24 },
+       { IPv4(203,117,0,0),16 },
+       { IPv4(203,117,32,0),19 },
+       { IPv4(203,118,0,0),18 },
+       { IPv4(203,121,0,0),19 },
+       { IPv4(203,121,0,0),18 },
+       { IPv4(203,121,32,0),19 },
+       { IPv4(203,121,64,0),20 },
+       { IPv4(203,121,64,0),19 },
+       { IPv4(203,121,80,0),20 },
+       { IPv4(203,121,96,0),19 },
+       { IPv4(203,121,96,0),20 },
+       { IPv4(203,121,112,0),20 },
+       { IPv4(203,121,128,0),20 },
+       { IPv4(203,121,128,0),24 },
+       { IPv4(203,121,128,0),19 },
+       { IPv4(203,121,130,0),24 },
+       { IPv4(203,121,131,0),24 },
+       { IPv4(203,121,138,0),24 },
+       { IPv4(203,121,144,0),20 },
+       { IPv4(203,122,0,0),18 },
+       { IPv4(203,123,0,0),19 },
+       { IPv4(203,123,95,0),24 },
+       { IPv4(203,123,224,0),19 },
+       { IPv4(203,124,96,0),19 },
+       { IPv4(203,124,128,0),20 },
+       { IPv4(203,124,130,0),23 },
+       { IPv4(203,124,131,0),24 },
+       { IPv4(203,124,132,0),23 },
+       { IPv4(203,124,133,0),24 },
+       { IPv4(203,124,134,0),23 },
+       { IPv4(203,124,136,0),22 },
+       { IPv4(203,124,140,0),23 },
+       { IPv4(203,124,237,0),24 },
+       { IPv4(203,124,248,0),24 },
+       { IPv4(203,124,249,0),24 },
+       { IPv4(203,125,128,0),17 },
+       { IPv4(203,126,0,0),16 },
+       { IPv4(203,126,77,0),24 },
+       { IPv4(203,127,25,0),24 },
+       { IPv4(203,127,100,0),23 },
+       { IPv4(203,127,108,0),24 },
+       { IPv4(203,127,132,0),24 },
+       { IPv4(203,127,225,0),24 },
+       { IPv4(203,129,194,0),24 },
+       { IPv4(203,129,195,0),24 },
+       { IPv4(203,129,202,0),24 },
+       { IPv4(203,129,204,0),24 },
+       { IPv4(203,129,205,0),24 },
+       { IPv4(203,129,207,0),24 },
+       { IPv4(203,129,216,0),24 },
+       { IPv4(203,129,220,0),24 },
+       { IPv4(203,129,221,0),24 },
+       { IPv4(203,129,222,0),23 },
+       { IPv4(203,129,222,0),24 },
+       { IPv4(203,129,223,0),24 },
+       { IPv4(203,129,244,0),22 },
+       { IPv4(203,129,252,0),22 },
+       { IPv4(203,129,254,0),23 },
+       { IPv4(203,130,128,0),19 },
+       { IPv4(203,132,100,0),24 },
+       { IPv4(203,132,107,0),24 },
+       { IPv4(203,132,108,0),24 },
+       { IPv4(203,132,111,0),24 },
+       { IPv4(203,132,125,0),24 },
+       { IPv4(203,132,224,0),19 },
+       { IPv4(203,133,0,0),17 },
+       { IPv4(203,134,0,0),20 },
+       { IPv4(203,134,2,0),24 },
+       { IPv4(203,134,12,0),23 },
+       { IPv4(203,134,13,0),24 },
+       { IPv4(203,134,18,0),23 },
+       { IPv4(203,134,32,0),21 },
+       { IPv4(203,134,56,0),22 },
+       { IPv4(203,134,64,0),19 },
+       { IPv4(203,134,96,0),20 },
+       { IPv4(203,134,116,0),22 },
+       { IPv4(203,134,117,0),24 },
+       { IPv4(203,134,144,0),20 },
+       { IPv4(203,134,148,0),22 },
+       { IPv4(203,134,160,0),21 },
+       { IPv4(203,134,168,0),21 },
+       { IPv4(203,134,176,0),22 },
+       { IPv4(203,134,184,0),21 },
+       { IPv4(203,135,0,0),24 },
+       { IPv4(203,135,7,0),24 },
+       { IPv4(203,135,10,0),24 },
+       { IPv4(203,135,11,0),24 },
+       { IPv4(203,135,12,0),24 },
+       { IPv4(203,135,13,0),24 },
+       { IPv4(203,135,16,0),24 },
+       { IPv4(203,135,17,0),24 },
+       { IPv4(203,135,18,0),24 },
+       { IPv4(203,135,19,0),24 },
+       { IPv4(203,135,20,0),24 },
+       { IPv4(203,135,21,0),24 },
+       { IPv4(203,135,22,0),24 },
+       { IPv4(203,135,23,0),24 },
+       { IPv4(203,135,28,0),24 },
+       { IPv4(203,135,30,0),24 },
+       { IPv4(203,135,33,0),24 },
+       { IPv4(203,135,34,0),24 },
+       { IPv4(203,135,35,0),24 },
+       { IPv4(203,135,36,0),24 },
+       { IPv4(203,135,37,0),24 },
+       { IPv4(203,135,38,0),24 },
+       { IPv4(203,135,40,0),24 },
+       { IPv4(203,135,43,0),24 },
+       { IPv4(203,135,50,0),24 },
+       { IPv4(203,135,60,0),24 },
+       { IPv4(203,135,99,0),24 },
+       { IPv4(203,136,0,0),16 },
+       { IPv4(203,139,0,0),17 },
+       { IPv4(203,139,192,0),19 },
+       { IPv4(203,139,224,0),19 },
+       { IPv4(203,140,32,0),19 },
+       { IPv4(203,140,128,0),19 },
+       { IPv4(203,140,154,0),24 },
+       { IPv4(203,140,176,0),20 },
+       { IPv4(203,141,64,0),19 },
+       { IPv4(203,141,128,0),19 },
+       { IPv4(203,141,160,0),19 },
+       { IPv4(203,141,192,0),19 },
+       { IPv4(203,141,224,0),20 },
+       { IPv4(203,142,128,0),19 },
+       { IPv4(203,142,244,0),24 },
+       { IPv4(203,142,245,0),24 },
+       { IPv4(203,143,128,0),23 },
+       { IPv4(203,143,130,0),24 },
+       { IPv4(203,143,131,0),24 },
+       { IPv4(203,143,224,0),19 },
+       { IPv4(203,144,32,0),20 },
+       { IPv4(203,144,128,0),20 },
+       { IPv4(203,144,144,0),20 },
+       { IPv4(203,144,160,0),20 },
+       { IPv4(203,144,176,0),20 },
+       { IPv4(203,144,192,0),22 },
+       { IPv4(203,144,196,0),22 },
+       { IPv4(203,144,200,0),22 },
+       { IPv4(203,144,204,0),22 },
+       { IPv4(203,144,208,0),22 },
+       { IPv4(203,144,212,0),22 },
+       { IPv4(203,144,216,0),22 },
+       { IPv4(203,144,220,0),22 },
+       { IPv4(203,144,224,0),22 },
+       { IPv4(203,144,228,0),22 },
+       { IPv4(203,144,232,0),22 },
+       { IPv4(203,144,233,0),24 },
+       { IPv4(203,144,236,0),22 },
+       { IPv4(203,144,240,0),22 },
+       { IPv4(203,144,240,0),24 },
+       { IPv4(203,144,244,0),22 },
+       { IPv4(203,144,248,0),22 },
+       { IPv4(203,144,252,0),22 },
+       { IPv4(203,145,128,0),22 },
+       { IPv4(203,145,133,0),24 },
+       { IPv4(203,145,134,0),23 },
+       { IPv4(203,145,135,0),24 },
+       { IPv4(203,145,136,0),22 },
+       { IPv4(203,145,140,0),22 },
+       { IPv4(203,145,144,0),22 },
+       { IPv4(203,145,147,0),24 },
+       { IPv4(203,145,148,0),22 },
+       { IPv4(203,145,152,0),23 },
+       { IPv4(203,145,154,0),24 },
+       { IPv4(203,145,156,0),24 },
+       { IPv4(203,145,157,0),24 },
+       { IPv4(203,145,159,0),24 },
+       { IPv4(203,145,224,0),23 },
+       { IPv4(203,146,18,0),24 },
+       { IPv4(203,146,205,0),24 },
+       { IPv4(203,146,242,0),23 },
+       { IPv4(203,147,0,0),18 },
+       { IPv4(203,147,60,0),24 },
+       { IPv4(203,148,128,0),20 },
+       { IPv4(203,148,141,0),24 },
+       { IPv4(203,148,144,0),20 },
+       { IPv4(203,148,144,0),21 },
+       { IPv4(203,148,160,0),19 },
+       { IPv4(203,148,161,0),24 },
+       { IPv4(203,148,162,0),24 },
+       { IPv4(203,148,192,0),22 },
+       { IPv4(203,148,196,0),22 },
+       { IPv4(203,148,200,0),21 },
+       { IPv4(203,148,208,0),20 },
+       { IPv4(203,148,224,0),19 },
+       { IPv4(203,149,0,0),19 },
+       { IPv4(203,149,32,0),19 },
+       { IPv4(203,149,52,0),24 },
+       { IPv4(203,149,128,0),17 },
+       { IPv4(203,150,121,0),24 },
+       { IPv4(203,151,240,0),20 },
+       { IPv4(203,152,0,0),18 },
+       { IPv4(203,152,0,0),22 },
+       { IPv4(203,152,4,0),22 },
+       { IPv4(203,152,8,0),22 },
+       { IPv4(203,152,12,0),22 },
+       { IPv4(203,152,16,0),22 },
+       { IPv4(203,152,20,0),22 },
+       { IPv4(203,152,28,0),22 },
+       { IPv4(203,152,32,0),22 },
+       { IPv4(203,152,36,0),22 },
+       { IPv4(203,152,40,0),22 },
+       { IPv4(203,152,44,0),22 },
+       { IPv4(203,152,48,0),22 },
+       { IPv4(203,152,52,0),22 },
+       { IPv4(203,152,56,0),22 },
+       { IPv4(203,152,60,0),22 },
+       { IPv4(203,153,128,0),20 },
+       { IPv4(203,154,0,0),16 },
+       { IPv4(203,154,221,0),24 },
+       { IPv4(203,154,222,0),24 },
+       { IPv4(203,155,0,0),16 },
+       { IPv4(203,157,0,0),16 },
+       { IPv4(203,158,0,0),22 },
+       { IPv4(203,158,6,0),23 },
+       { IPv4(203,159,0,0),18 },
+       { IPv4(203,159,64,0),19 },
+       { IPv4(203,160,224,0),21 },
+       { IPv4(203,160,232,0),22 },
+       { IPv4(203,160,236,0),22 },
+       { IPv4(203,160,240,0),20 },
+       { IPv4(203,161,32,0),20 },
+       { IPv4(203,161,39,0),24 },
+       { IPv4(203,161,128,0),21 },
+       { IPv4(203,162,0,0),20 },
+       { IPv4(203,162,16,0),20 },
+       { IPv4(203,163,61,0),24 },
+       { IPv4(203,163,64,0),18 },
+       { IPv4(203,164,0,0),16 },
+       { IPv4(203,166,45,0),24 },
+       { IPv4(203,167,1,0),24 },
+       { IPv4(203,167,2,0),24 },
+       { IPv4(203,167,9,0),24 },
+       { IPv4(203,167,10,0),24 },
+       { IPv4(203,167,26,0),24 },
+       { IPv4(203,167,64,0),19 },
+       { IPv4(203,167,70,0),24 },
+       { IPv4(203,167,71,0),24 },
+       { IPv4(203,167,72,0),24 },
+       { IPv4(203,167,73,0),24 },
+       { IPv4(203,167,74,0),24 },
+       { IPv4(203,167,96,0),19 },
+       { IPv4(203,167,105,0),24 },
+       { IPv4(203,167,106,0),24 },
+       { IPv4(203,167,107,0),24 },
+       { IPv4(203,167,111,0),24 },
+       { IPv4(203,167,112,0),24 },
+       { IPv4(203,167,128,0),17 },
+       { IPv4(203,168,0,0),22 },
+       { IPv4(203,168,0,0),24 },
+       { IPv4(203,168,1,0),24 },
+       { IPv4(203,168,2,0),24 },
+       { IPv4(203,168,3,0),24 },
+       { IPv4(203,168,4,0),22 },
+       { IPv4(203,168,8,0),22 },
+       { IPv4(203,168,12,0),22 },
+       { IPv4(203,168,16,0),20 },
+       { IPv4(203,168,20,0),24 },
+       { IPv4(203,168,21,0),24 },
+       { IPv4(203,168,22,0),24 },
+       { IPv4(203,168,23,0),24 },
+       { IPv4(203,168,74,0),24 },
+       { IPv4(203,168,75,0),24 },
+       { IPv4(203,168,77,0),24 },
+       { IPv4(203,168,78,0),24 },
+       { IPv4(203,168,144,0),20 },
+       { IPv4(203,168,192,0),21 },
+       { IPv4(203,168,200,0),21 },
+       { IPv4(203,168,224,0),19 },
+       { IPv4(203,169,90,0),24 },
+       { IPv4(203,170,1,0),24 },
+       { IPv4(203,170,2,0),24 },
+       { IPv4(203,170,3,0),24 },
+       { IPv4(203,170,4,0),24 },
+       { IPv4(203,170,5,0),24 },
+       { IPv4(203,170,6,0),24 },
+       { IPv4(203,170,7,0),24 },
+       { IPv4(203,170,8,0),24 },
+       { IPv4(203,170,9,0),24 },
+       { IPv4(203,170,10,0),24 },
+       { IPv4(203,170,11,0),24 },
+       { IPv4(203,170,12,0),24 },
+       { IPv4(203,170,13,0),24 },
+       { IPv4(203,170,14,0),24 },
+       { IPv4(203,170,15,0),24 },
+       { IPv4(203,170,128,0),18 },
+       { IPv4(203,170,160,0),19 },
+       { IPv4(203,170,176,0),24 },
+       { IPv4(203,170,177,0),24 },
+       { IPv4(203,170,178,0),24 },
+       { IPv4(203,170,179,0),24 },
+       { IPv4(203,170,190,0),24 },
+       { IPv4(203,170,191,0),24 },
+       { IPv4(203,170,192,0),18 },
+       { IPv4(203,170,224,0),19 },
+       { IPv4(203,172,0,0),19 },
+       { IPv4(203,172,24,0),24 },
+       { IPv4(203,173,128,0),19 },
+       { IPv4(203,173,252,0),24 },
+       { IPv4(203,175,0,0),24 },
+       { IPv4(203,175,1,0),24 },
+       { IPv4(203,175,2,0),24 },
+       { IPv4(203,176,0,0),19 },
+       { IPv4(203,176,5,0),24 },
+       { IPv4(203,176,6,0),24 },
+       { IPv4(203,176,8,0),24 },
+       { IPv4(203,176,23,0),24 },
+       { IPv4(203,176,24,0),22 },
+       { IPv4(203,176,32,0),19 },
+       { IPv4(203,176,35,0),24 },
+       { IPv4(203,176,44,0),24 },
+       { IPv4(203,176,46,0),24 },
+       { IPv4(203,176,47,0),24 },
+       { IPv4(203,176,56,0),22 },
+       { IPv4(203,176,64,0),19 },
+       { IPv4(203,176,65,0),24 },
+       { IPv4(203,176,75,0),24 },
+       { IPv4(203,176,92,0),22 },
+       { IPv4(203,177,3,0),24 },
+       { IPv4(203,177,32,0),19 },
+       { IPv4(203,177,64,0),20 },
+       { IPv4(203,177,252,0),24 },
+       { IPv4(203,177,253,0),24 },
+       { IPv4(203,177,254,0),24 },
+       { IPv4(203,178,32,0),24 },
+       { IPv4(203,178,33,0),24 },
+       { IPv4(203,178,36,0),22 },
+       { IPv4(203,178,64,0),18 },
+       { IPv4(203,178,128,0),19 },
+       { IPv4(203,179,192,0),19 },
+       { IPv4(203,181,96,0),19 },
+       { IPv4(203,181,192,0),18 },
+       { IPv4(203,185,64,0),18 },
+       { IPv4(203,185,129,0),24 },
+       { IPv4(203,185,140,0),22 },
+       { IPv4(203,185,192,0),19 },
+       { IPv4(203,185,224,0),19 },
+       { IPv4(203,185,238,0),24 },
+       { IPv4(203,186,38,0),23 },
+       { IPv4(203,186,66,0),23 },
+       { IPv4(203,186,94,0),24 },
+       { IPv4(203,186,95,0),24 },
+       { IPv4(203,186,192,0),18 },
+       { IPv4(203,187,0,0),17 },
+       { IPv4(203,188,128,0),21 },
+       { IPv4(203,189,254,0),24 },
+       { IPv4(203,190,0,0),24 },
+       { IPv4(203,190,0,0),22 },
+       { IPv4(203,190,1,0),24 },
+       { IPv4(203,190,2,0),24 },
+       { IPv4(203,190,3,0),24 },
+       { IPv4(203,190,254,0),23 },
+       { IPv4(203,194,128,0),20 },
+       { IPv4(203,194,176,0),21 },
+       { IPv4(203,194,184,0),22 },
+       { IPv4(203,194,216,0),21 },
+       { IPv4(203,195,0,0),18 },
+       { IPv4(203,195,0,0),19 },
+       { IPv4(203,195,32,0),19 },
+       { IPv4(203,195,128,0),23 },
+       { IPv4(203,195,129,0),24 },
+       { IPv4(203,195,130,0),23 },
+       { IPv4(203,195,132,0),22 },
+       { IPv4(203,195,136,0),21 },
+       { IPv4(203,195,144,0),23 },
+       { IPv4(203,195,146,0),23 },
+       { IPv4(203,195,148,0),22 },
+       { IPv4(203,195,150,0),24 },
+       { IPv4(203,195,156,0),22 },
+       { IPv4(203,195,160,0),21 },
+       { IPv4(203,195,164,0),24 },
+       { IPv4(203,195,170,0),23 },
+       { IPv4(203,195,172,0),24 },
+       { IPv4(203,195,175,0),24 },
+       { IPv4(203,195,180,0),22 },
+       { IPv4(203,195,184,0),22 },
+       { IPv4(203,195,192,0),22 },
+       { IPv4(203,195,196,0),22 },
+       { IPv4(203,195,200,0),22 },
+       { IPv4(203,195,204,0),22 },
+       { IPv4(203,195,222,0),24 },
+       { IPv4(203,195,223,0),24 },
+       { IPv4(203,196,0,0),22 },
+       { IPv4(203,196,4,0),22 },
+       { IPv4(203,196,4,0),24 },
+       { IPv4(203,196,7,0),24 },
+       { IPv4(203,196,64,0),21 },
+       { IPv4(203,196,72,0),21 },
+       { IPv4(203,196,128,0),21 },
+       { IPv4(203,196,136,0),21 },
+       { IPv4(203,196,142,0),23 },
+       { IPv4(203,196,144,0),22 },
+       { IPv4(203,196,148,0),23 },
+       { IPv4(203,196,150,0),23 },
+       { IPv4(203,203,0,0),16 },
+       { IPv4(203,204,0,0),16 },
+       { IPv4(203,204,0,0),17 },
+       { IPv4(203,204,128,0),17 },
+       { IPv4(203,207,0,0),20 },
+       { IPv4(203,207,4,0),24 },
+       { IPv4(203,207,5,0),24 },
+       { IPv4(203,207,7,0),24 },
+       { IPv4(203,208,0,0),20 },
+       { IPv4(203,208,16,0),24 },
+       { IPv4(203,208,128,0),17 },
+       { IPv4(203,208,128,0),19 },
+       { IPv4(203,208,129,0),24 },
+       { IPv4(203,208,130,0),24 },
+       { IPv4(203,208,131,0),24 },
+       { IPv4(203,208,132,0),24 },
+       { IPv4(203,208,134,0),24 },
+       { IPv4(203,208,135,0),24 },
+       { IPv4(203,208,136,0),24 },
+       { IPv4(203,208,138,0),24 },
+       { IPv4(203,208,139,0),24 },
+       { IPv4(203,208,144,0),24 },
+       { IPv4(203,208,170,0),24 },
+       { IPv4(203,208,224,0),24 },
+       { IPv4(203,208,255,0),24 },
+       { IPv4(203,209,0,0),18 },
+       { IPv4(203,212,64,0),24 },
+       { IPv4(203,212,65,0),24 },
+       { IPv4(203,213,0,0),24 },
+       { IPv4(203,213,2,0),24 },
+       { IPv4(203,213,48,0),24 },
+       { IPv4(203,213,128,0),19 },
+       { IPv4(203,224,0,0),16 },
+       { IPv4(203,225,0,0),16 },
+       { IPv4(203,226,0,0),18 },
+       { IPv4(203,226,128,0),18 },
+       { IPv4(203,226,192,0),18 },
+       { IPv4(203,227,19,0),24 },
+       { IPv4(203,227,164,0),22 },
+       { IPv4(203,227,232,0),24 },
+       { IPv4(203,228,0,0),17 },
+       { IPv4(203,228,128,0),18 },
+       { IPv4(203,228,128,0),17 },
+       { IPv4(203,228,176,0),24 },
+       { IPv4(203,228,177,0),24 },
+       { IPv4(203,228,178,0),24 },
+       { IPv4(203,228,192,0),18 },
+       { IPv4(203,228,208,0),21 },
+       { IPv4(203,228,216,0),21 },
+       { IPv4(203,228,224,0),21 },
+       { IPv4(203,229,0,0),17 },
+       { IPv4(203,229,128,0),17 },
+       { IPv4(203,229,147,0),24 },
+       { IPv4(203,230,0,0),17 },
+       { IPv4(203,230,4,0),23 },
+       { IPv4(203,230,12,0),22 },
+       { IPv4(203,230,64,0),21 },
+       { IPv4(203,230,72,0),23 },
+       { IPv4(203,230,74,0),24 },
+       { IPv4(203,230,76,0),24 },
+       { IPv4(203,230,76,0),22 },
+       { IPv4(203,230,80,0),20 },
+       { IPv4(203,230,96,0),21 },
+       { IPv4(203,230,104,0),22 },
+       { IPv4(203,230,128,0),17 },
+       { IPv4(203,230,152,0),22 },
+       { IPv4(203,230,160,0),19 },
+       { IPv4(203,230,208,0),23 },
+       { IPv4(203,230,236,0),22 },
+       { IPv4(203,232,0,0),16 },
+       { IPv4(203,232,126,0),23 },
+       { IPv4(203,232,128,0),21 },
+       { IPv4(203,232,136,0),22 },
+       { IPv4(203,232,140,0),22 },
+       { IPv4(203,232,161,0),24 },
+       { IPv4(203,232,162,0),23 },
+       { IPv4(203,232,172,0),22 },
+       { IPv4(203,232,176,0),22 },
+       { IPv4(203,232,180,0),23 },
+       { IPv4(203,232,186,0),24 },
+       { IPv4(203,232,224,0),20 },
+       { IPv4(203,233,0,0),17 },
+       { IPv4(203,233,54,0),24 },
+       { IPv4(203,233,55,0),24 },
+       { IPv4(203,233,56,0),24 },
+       { IPv4(203,233,57,0),24 },
+       { IPv4(203,233,82,0),24 },
+       { IPv4(203,233,85,0),24 },
+       { IPv4(203,233,128,0),21 },
+       { IPv4(203,233,136,0),22 },
+       { IPv4(203,233,144,0),20 },
+       { IPv4(203,233,160,0),19 },
+       { IPv4(203,233,192,0),19 },
+       { IPv4(203,233,224,0),20 },
+       { IPv4(203,234,0,0),16 },
+       { IPv4(203,234,8,0),21 },
+       { IPv4(203,234,48,0),20 },
+       { IPv4(203,234,96,0),21 },
+       { IPv4(203,234,104,0),22 },
+       { IPv4(203,234,108,0),23 },
+       { IPv4(203,234,110,0),24 },
+       { IPv4(203,234,132,0),24 },
+       { IPv4(203,234,163,0),24 },
+       { IPv4(203,234,241,0),24 },
+       { IPv4(203,235,8,0),24 },
+       { IPv4(203,235,68,0),24 },
+       { IPv4(203,235,84,0),23 },
+       { IPv4(203,235,128,0),18 },
+       { IPv4(203,235,192,0),21 },
+       { IPv4(203,235,208,0),20 },
+       { IPv4(203,235,224,0),19 },
+       { IPv4(203,236,0,0),19 },
+       { IPv4(203,236,0,0),17 },
+       { IPv4(203,236,32,0),21 },
+       { IPv4(203,236,40,0),22 },
+       { IPv4(203,236,52,0),22 },
+       { IPv4(203,236,56,0),22 },
+       { IPv4(203,236,60,0),24 },
+       { IPv4(203,236,62,0),23 },
+       { IPv4(203,236,64,0),24 },
+       { IPv4(203,236,65,0),24 },
+       { IPv4(203,236,66,0),24 },
+       { IPv4(203,236,67,0),24 },
+       { IPv4(203,236,69,0),24 },
+       { IPv4(203,236,70,0),24 },
+       { IPv4(203,236,72,0),24 },
+       { IPv4(203,236,73,0),24 },
+       { IPv4(203,236,75,0),24 },
+       { IPv4(203,236,76,0),24 },
+       { IPv4(203,236,77,0),24 },
+       { IPv4(203,236,78,0),24 },
+       { IPv4(203,237,0,0),19 },
+       { IPv4(203,237,32,0),19 },
+       { IPv4(203,237,64,0),19 },
+       { IPv4(203,237,96,0),19 },
+       { IPv4(203,237,128,0),17 },
+       { IPv4(203,237,204,0),22 },
+       { IPv4(203,237,208,0),21 },
+       { IPv4(203,238,0,0),24 },
+       { IPv4(203,238,1,0),24 },
+       { IPv4(203,238,7,0),24 },
+       { IPv4(203,238,28,0),24 },
+       { IPv4(203,238,37,0),24 },
+       { IPv4(203,238,67,0),24 },
+       { IPv4(203,238,72,0),21 },
+       { IPv4(203,238,128,0),18 },
+       { IPv4(203,238,192,0),19 },
+       { IPv4(203,238,224,0),19 },
+       { IPv4(203,239,34,0),24 },
+       { IPv4(203,239,56,0),24 },
+       { IPv4(203,239,57,0),24 },
+       { IPv4(203,239,63,0),24 },
+       { IPv4(203,239,192,0),24 },
+       { IPv4(203,239,193,0),24 },
+       { IPv4(203,239,194,0),23 },
+       { IPv4(203,239,196,0),23 },
+       { IPv4(203,239,198,0),24 },
+       { IPv4(203,239,199,0),24 },
+       { IPv4(203,239,200,0),21 },
+       { IPv4(203,239,208,0),24 },
+       { IPv4(203,239,209,0),24 },
+       { IPv4(203,239,210,0),24 },
+       { IPv4(203,239,211,0),24 },
+       { IPv4(203,239,212,0),22 },
+       { IPv4(203,239,216,0),24 },
+       { IPv4(203,239,217,0),24 },
+       { IPv4(203,239,218,0),23 },
+       { IPv4(203,239,220,0),22 },
+       { IPv4(203,239,224,0),19 },
+       { IPv4(203,240,0,0),18 },
+       { IPv4(203,240,64,0),23 },
+       { IPv4(203,240,67,0),24 },
+       { IPv4(203,240,68,0),24 },
+       { IPv4(203,240,128,0),17 },
+       { IPv4(203,240,128,0),18 },
+       { IPv4(203,240,128,0),24 },
+       { IPv4(203,240,192,0),18 },
+       { IPv4(203,240,231,0),24 },
+       { IPv4(203,241,0,0),19 },
+       { IPv4(203,241,52,0),22 },
+       { IPv4(203,241,56,0),23 },
+       { IPv4(203,241,58,0),23 },
+       { IPv4(203,241,60,0),23 },
+       { IPv4(203,241,62,0),23 },
+       { IPv4(203,241,64,0),23 },
+       { IPv4(203,241,64,0),21 },
+       { IPv4(203,241,66,0),23 },
+       { IPv4(203,241,68,0),24 },
+       { IPv4(203,241,69,0),24 },
+       { IPv4(203,241,70,0),23 },
+       { IPv4(203,241,72,0),21 },
+       { IPv4(203,241,80,0),22 },
+       { IPv4(203,241,84,0),22 },
+       { IPv4(203,241,88,0),21 },
+       { IPv4(203,241,96,0),20 },
+       { IPv4(203,241,112,0),22 },
+       { IPv4(203,241,116,0),22 },
+       { IPv4(203,241,120,0),21 },
+       { IPv4(203,241,120,0),22 },
+       { IPv4(203,241,124,0),22 },
+       { IPv4(203,241,128,0),22 },
+       { IPv4(203,241,164,0),23 },
+       { IPv4(203,241,167,0),24 },
+       { IPv4(203,241,168,0),22 },
+       { IPv4(203,241,168,0),24 },
+       { IPv4(203,241,170,0),24 },
+       { IPv4(203,241,172,0),22 },
+       { IPv4(203,241,174,0),23 },
+       { IPv4(203,241,176,0),20 },
+       { IPv4(203,241,192,0),20 },
+       { IPv4(203,241,208,0),23 },
+       { IPv4(203,241,208,0),20 },
+       { IPv4(203,241,212,0),23 },
+       { IPv4(203,241,224,0),19 },
+       { IPv4(203,242,32,0),20 },
+       { IPv4(203,242,48,0),21 },
+       { IPv4(203,242,56,0),22 },
+       { IPv4(203,242,60,0),23 },
+       { IPv4(203,242,62,0),24 },
+       { IPv4(203,242,63,0),24 },
+       { IPv4(203,242,64,0),19 },
+       { IPv4(203,242,112,0),21 },
+       { IPv4(203,242,120,0),21 },
+       { IPv4(203,242,128,0),17 },
+       { IPv4(203,243,0,0),19 },
+       { IPv4(203,243,0,0),18 },
+       { IPv4(203,243,32,0),19 },
+       { IPv4(203,243,64,0),18 },
+       { IPv4(203,243,152,0),22 },
+       { IPv4(203,243,156,0),22 },
+       { IPv4(203,243,160,0),19 },
+       { IPv4(203,243,192,0),20 },
+       { IPv4(203,243,208,0),21 },
+       { IPv4(203,243,253,0),24 },
+       { IPv4(203,244,0,0),19 },
+       { IPv4(203,244,32,0),19 },
+       { IPv4(203,244,64,0),19 },
+       { IPv4(203,244,128,0),18 },
+       { IPv4(203,245,0,0),18 },
+       { IPv4(203,245,64,0),18 },
+       { IPv4(203,245,128,0),17 },
+       { IPv4(203,246,0,0),17 },
+       { IPv4(203,246,64,0),21 },
+       { IPv4(203,246,104,0),21 },
+       { IPv4(203,246,118,0),24 },
+       { IPv4(203,246,119,0),24 },
+       { IPv4(203,246,128,0),19 },
+       { IPv4(203,246,176,0),22 },
+       { IPv4(203,246,180,0),22 },
+       { IPv4(203,246,184,0),24 },
+       { IPv4(203,246,186,0),24 },
+       { IPv4(203,246,187,0),24 },
+       { IPv4(203,246,188,0),24 },
+       { IPv4(203,246,189,0),24 },
+       { IPv4(203,247,0,0),19 },
+       { IPv4(203,247,32,0),19 },
+       { IPv4(203,247,64,0),18 },
+       { IPv4(203,247,66,0),24 },
+       { IPv4(203,247,80,0),24 },
+       { IPv4(203,247,128,0),19 },
+       { IPv4(203,247,160,0),19 },
+       { IPv4(203,247,161,0),24 },
+       { IPv4(203,247,162,0),24 },
+       { IPv4(203,247,166,0),24 },
+       { IPv4(203,247,168,0),23 },
+       { IPv4(203,247,180,0),23 },
+       { IPv4(203,247,192,0),20 },
+       { IPv4(203,247,212,0),22 },
+       { IPv4(203,247,216,0),21 },
+       { IPv4(203,247,220,0),22 },
+       { IPv4(203,247,224,0),19 },
+       { IPv4(203,248,116,0),24 },
+       { IPv4(203,248,117,0),24 },
+       { IPv4(203,248,118,0),24 },
+       { IPv4(203,248,128,0),17 },
+       { IPv4(203,248,188,0),24 },
+       { IPv4(203,249,0,0),19 },
+       { IPv4(203,249,0,0),17 },
+       { IPv4(203,249,11,0),24 },
+       { IPv4(203,249,12,0),22 },
+       { IPv4(203,249,16,0),24 },
+       { IPv4(203,249,18,0),23 },
+       { IPv4(203,249,35,0),24 },
+       { IPv4(203,249,38,0),24 },
+       { IPv4(203,249,42,0),23 },
+       { IPv4(203,249,44,0),23 },
+       { IPv4(203,249,47,0),24 },
+       { IPv4(203,249,48,0),20 },
+       { IPv4(203,249,64,0),19 },
+       { IPv4(203,249,84,0),22 },
+       { IPv4(203,249,88,0),22 },
+       { IPv4(203,249,92,0),23 },
+       { IPv4(203,249,94,0),24 },
+       { IPv4(203,249,95,0),24 },
+       { IPv4(203,249,96,0),20 },
+       { IPv4(203,249,128,0),19 },
+       { IPv4(203,249,160,0),20 },
+       { IPv4(203,249,224,0),19 },
+       { IPv4(203,250,0,0),19 },
+       { IPv4(203,250,32,0),19 },
+       { IPv4(203,250,64,0),19 },
+       { IPv4(203,250,96,0),20 },
+       { IPv4(203,250,112,0),21 },
+       { IPv4(203,250,120,0),21 },
+       { IPv4(203,250,128,0),20 },
+       { IPv4(203,250,144,0),20 },
+       { IPv4(203,250,160,0),20 },
+       { IPv4(203,250,184,0),21 },
+       { IPv4(203,250,188,0),22 },
+       { IPv4(203,250,192,0),18 },
+       { IPv4(203,251,0,0),17 },
+       { IPv4(203,251,128,0),18 },
+       { IPv4(203,251,192,0),19 },
+       { IPv4(203,251,192,0),18 },
+       { IPv4(203,251,224,0),19 },
+       { IPv4(203,251,226,0),23 },
+       { IPv4(203,251,228,0),23 },
+       { IPv4(203,251,230,0),24 },
+       { IPv4(203,251,250,0),24 },
+       { IPv4(203,251,253,0),24 },
+       { IPv4(203,252,0,0),20 },
+       { IPv4(203,252,16,0),22 },
+       { IPv4(203,252,16,0),21 },
+       { IPv4(203,252,20,0),23 },
+       { IPv4(203,252,27,0),24 },
+       { IPv4(203,252,28,0),22 },
+       { IPv4(203,252,32,0),21 },
+       { IPv4(203,252,40,0),24 },
+       { IPv4(203,252,41,0),24 },
+       { IPv4(203,252,42,0),24 },
+       { IPv4(203,252,43,0),24 },
+       { IPv4(203,252,44,0),22 },
+       { IPv4(203,252,48,0),20 },
+       { IPv4(203,252,64,0),19 },
+       { IPv4(203,252,96,0),19 },
+       { IPv4(203,252,128,0),19 },
+       { IPv4(203,252,160,0),24 },
+       { IPv4(203,252,161,0),24 },
+       { IPv4(203,252,162,0),24 },
+       { IPv4(203,252,163,0),24 },
+       { IPv4(203,252,164,0),24 },
+       { IPv4(203,252,165,0),24 },
+       { IPv4(203,252,166,0),24 },
+       { IPv4(203,252,168,0),21 },
+       { IPv4(203,252,176,0),20 },
+       { IPv4(203,252,192,0),20 },
+       { IPv4(203,252,208,0),20 },
+       { IPv4(203,252,224,0),19 },
+       { IPv4(203,253,0,0),19 },
+       { IPv4(203,253,32,0),20 },
+       { IPv4(203,253,64,0),20 },
+       { IPv4(203,253,96,0),20 },
+       { IPv4(203,253,128,0),20 },
+       { IPv4(203,253,144,0),22 },
+       { IPv4(203,253,160,0),20 },
+       { IPv4(203,253,176,0),21 },
+       { IPv4(203,253,184,0),22 },
+       { IPv4(203,253,188,0),23 },
+       { IPv4(203,253,190,0),24 },
+       { IPv4(203,253,192,0),19 },
+       { IPv4(203,253,224,0),22 },
+       { IPv4(203,253,232,0),24 },
+       { IPv4(203,253,237,0),24 },
+       { IPv4(203,253,240,0),21 },
+       { IPv4(203,253,240,0),22 },
+       { IPv4(203,253,248,0),21 },
+       { IPv4(203,253,254,0),24 },
+       { IPv4(203,253,255,0),24 },
+       { IPv4(203,254,0,0),18 },
+       { IPv4(203,254,8,0),23 },
+       { IPv4(203,254,9,0),24 },
+       { IPv4(203,254,10,0),24 },
+       { IPv4(203,254,41,0),24 },
+       { IPv4(203,254,42,0),24 },
+       { IPv4(203,254,43,0),24 },
+       { IPv4(203,254,44,0),24 },
+       { IPv4(203,254,47,0),24 },
+       { IPv4(203,254,64,0),19 },
+       { IPv4(203,254,96,0),22 },
+       { IPv4(203,254,120,0),24 },
+       { IPv4(203,254,128,0),19 },
+       { IPv4(203,254,160,0),21 },
+       { IPv4(203,254,168,0),22 },
+       { IPv4(203,254,172,0),24 },
+       { IPv4(203,254,173,0),24 },
+       { IPv4(203,254,176,0),20 },
+       { IPv4(203,255,0,0),18 },
+       { IPv4(203,255,64,0),19 },
+       { IPv4(203,255,96,0),20 },
+       { IPv4(203,255,120,0),22 },
+       { IPv4(203,255,156,0),24 },
+       { IPv4(203,255,157,0),24 },
+       { IPv4(203,255,158,0),24 },
+       { IPv4(203,255,159,0),24 },
+       { IPv4(203,255,160,0),19 },
+       { IPv4(203,255,192,0),20 },
+       { IPv4(203,255,208,0),24 },
+       { IPv4(203,255,209,0),24 },
+       { IPv4(203,255,210,0),24 },
+       { IPv4(203,255,211,0),24 },
+       { IPv4(203,255,212,0),22 },
+       { IPv4(203,255,216,0),23 },
+       { IPv4(203,255,216,0),22 },
+       { IPv4(203,255,218,0),23 },
+       { IPv4(203,255,220,0),23 },
+       { IPv4(203,255,222,0),23 },
+       { IPv4(203,255,224,0),21 },
+       { IPv4(203,255,232,0),24 },
+       { IPv4(203,255,234,0),24 },
+       { IPv4(203,255,236,0),22 },
+       { IPv4(203,255,240,0),21 },
+       { IPv4(203,255,248,0),21 },
+       { IPv4(204,0,0,0),14 },
+       { IPv4(204,0,43,0),24 },
+       { IPv4(204,0,49,0),24 },
+       { IPv4(204,4,60,0),24 },
+       { IPv4(204,4,86,0),23 },
+       { IPv4(204,4,88,0),24 },
+       { IPv4(204,4,178,0),24 },
+       { IPv4(204,4,179,0),24 },
+       { IPv4(204,4,182,0),24 },
+       { IPv4(204,4,183,0),24 },
+       { IPv4(204,4,185,0),24 },
+       { IPv4(204,4,187,0),24 },
+       { IPv4(204,4,190,0),24 },
+       { IPv4(204,4,191,0),24 },
+       { IPv4(204,6,36,0),24 },
+       { IPv4(204,6,91,0),24 },
+       { IPv4(204,6,205,0),24 },
+       { IPv4(204,6,206,0),24 },
+       { IPv4(204,6,207,0),24 },
+       { IPv4(204,6,208,0),24 },
+       { IPv4(204,11,1,0),24 },
+       { IPv4(204,17,16,0),24 },
+       { IPv4(204,17,16,0),20 },
+       { IPv4(204,17,17,0),24 },
+       { IPv4(204,17,24,0),24 },
+       { IPv4(204,17,26,0),24 },
+       { IPv4(204,17,27,0),24 },
+       { IPv4(204,17,64,0),18 },
+       { IPv4(204,17,128,0),23 },
+       { IPv4(204,17,132,0),24 },
+       { IPv4(204,17,133,0),24 },
+       { IPv4(204,17,139,0),24 },
+       { IPv4(204,17,177,0),24 },
+       { IPv4(204,17,179,0),24 },
+       { IPv4(204,17,185,0),24 },
+       { IPv4(204,17,189,0),24 },
+       { IPv4(204,17,195,0),24 },
+       { IPv4(204,17,201,0),24 },
+       { IPv4(204,17,205,0),24 },
+       { IPv4(204,17,209,0),24 },
+       { IPv4(204,17,221,0),24 },
+       { IPv4(204,17,228,0),24 },
+       { IPv4(204,19,1,0),24 },
+       { IPv4(204,19,16,0),24 },
+       { IPv4(204,19,34,0),24 },
+       { IPv4(204,19,35,0),24 },
+       { IPv4(204,19,116,0),23 },
+       { IPv4(204,19,136,0),24 },
+       { IPv4(204,19,138,0),24 },
+       { IPv4(204,19,164,0),24 },
+       { IPv4(204,19,170,0),24 },
+       { IPv4(204,19,170,0),23 },
+       { IPv4(204,19,172,0),22 },
+       { IPv4(204,19,184,0),23 },
+       { IPv4(204,26,1,0),24 },
+       { IPv4(204,26,5,0),24 },
+       { IPv4(204,26,6,0),24 },
+       { IPv4(204,26,16,0),20 },
+       { IPv4(204,27,64,0),18 },
+       { IPv4(204,27,114,0),24 },
+       { IPv4(204,27,128,0),24 },
+       { IPv4(204,27,133,0),24 },
+       { IPv4(204,27,156,0),24 },
+       { IPv4(204,27,162,0),24 },
+       { IPv4(204,27,165,0),24 },
+       { IPv4(204,27,176,0),24 },
+       { IPv4(204,27,180,0),24 },
+       { IPv4(204,27,188,0),24 },
+       { IPv4(204,27,196,0),24 },
+       { IPv4(204,27,239,0),24 },
+       { IPv4(204,27,250,0),24 },
+       { IPv4(204,27,251,0),24 },
+       { IPv4(204,27,253,0),24 },
+       { IPv4(204,28,3,0),24 },
+       { IPv4(204,28,140,0),24 },
+       { IPv4(204,28,150,0),24 },
+       { IPv4(204,29,134,0),24 },
+       { IPv4(204,29,135,0),24 },
+       { IPv4(204,29,145,0),24 },
+       { IPv4(204,29,154,0),24 },
+       { IPv4(204,29,171,0),24 },
+       { IPv4(204,29,185,0),24 },
+       { IPv4(204,29,186,0),23 },
+       { IPv4(204,29,192,0),24 },
+       { IPv4(204,29,196,0),24 },
+       { IPv4(204,29,197,0),24 },
+       { IPv4(204,29,200,0),24 },
+       { IPv4(204,29,206,0),24 },
+       { IPv4(204,29,207,0),24 },
+       { IPv4(204,29,217,0),24 },
+       { IPv4(204,29,238,0),24 },
+       { IPv4(204,29,239,0),24 },
+       { IPv4(204,29,244,0),22 },
+       { IPv4(204,29,245,0),24 },
+       { IPv4(204,30,0,0),15 },
+       { IPv4(204,30,91,0),24 },
+       { IPv4(204,30,103,0),24 },
+       { IPv4(204,30,120,0),24 },
+       { IPv4(204,31,0,0),24 },
+       { IPv4(204,31,169,0),24 },
+       { IPv4(204,31,181,0),24 },
+       { IPv4(204,31,191,0),24 },
+       { IPv4(204,31,192,0),24 },
+       { IPv4(204,31,193,0),24 },
+       { IPv4(204,31,198,0),24 },
+       { IPv4(204,31,213,0),24 },
+       { IPv4(204,32,0,0),15 },
+       { IPv4(204,32,8,0),24 },
+       { IPv4(204,32,16,0),24 },
+       { IPv4(204,32,18,0),24 },
+       { IPv4(204,32,20,0),24 },
+       { IPv4(204,32,38,0),24 },
+       { IPv4(204,32,92,0),24 },
+       { IPv4(204,32,94,0),24 },
+       { IPv4(204,32,125,0),24 },
+       { IPv4(204,33,56,0),24 },
+       { IPv4(204,33,160,0),22 },
+       { IPv4(204,33,164,0),23 },
+       { IPv4(204,33,181,0),24 },
+       { IPv4(204,33,192,0),23 },
+       { IPv4(204,33,194,0),24 },
+       { IPv4(204,33,211,0),24 },
+       { IPv4(204,33,213,0),24 },
+       { IPv4(204,33,214,0),24 },
+       { IPv4(204,33,215,0),24 },
+       { IPv4(204,33,216,0),24 },
+       { IPv4(204,33,217,0),24 },
+       { IPv4(204,33,218,0),24 },
+       { IPv4(204,33,249,0),24 },
+       { IPv4(204,33,250,0),24 },
+       { IPv4(204,34,0,0),19 },
+       { IPv4(204,34,2,0),24 },
+       { IPv4(204,34,3,0),24 },
+       { IPv4(204,34,4,0),24 },
+       { IPv4(204,34,8,0),24 },
+       { IPv4(204,34,9,0),24 },
+       { IPv4(204,34,10,0),24 },
+       { IPv4(204,34,11,0),24 },
+       { IPv4(204,34,12,0),24 },
+       { IPv4(204,34,13,0),24 },
+       { IPv4(204,34,14,0),24 },
+       { IPv4(204,34,15,0),24 },
+       { IPv4(204,34,64,0),18 },
+       { IPv4(204,34,108,0),24 },
+       { IPv4(204,34,109,0),24 },
+       { IPv4(204,34,128,0),17 },
+       { IPv4(204,34,136,0),24 },
+       { IPv4(204,34,141,0),24 },
+       { IPv4(204,34,153,0),24 },
+       { IPv4(204,34,154,0),24 },
+       { IPv4(204,34,170,0),24 },
+       { IPv4(204,34,177,0),24 },
+       { IPv4(204,34,197,0),24 },
+       { IPv4(204,34,201,0),24 },
+       { IPv4(204,34,204,0),24 },
+       { IPv4(204,34,205,0),24 },
+       { IPv4(204,34,226,0),24 },
+       { IPv4(204,34,229,0),24 },
+       { IPv4(204,34,236,0),24 },
+       { IPv4(204,34,239,0),24 },
+       { IPv4(204,34,244,0),24 },
+       { IPv4(204,34,251,0),24 },
+       { IPv4(204,34,254,0),24 },
+       { IPv4(204,36,0,0),20 },
+       { IPv4(204,36,15,0),24 },
+       { IPv4(204,36,16,0),20 },
+       { IPv4(204,36,32,0),24 },
+       { IPv4(204,36,38,0),24 },
+       { IPv4(204,36,47,0),24 },
+       { IPv4(204,37,8,0),21 },
+       { IPv4(204,37,16,0),24 },
+       { IPv4(204,37,16,0),21 },
+       { IPv4(204,37,17,0),24 },
+       { IPv4(204,37,128,0),24 },
+       { IPv4(204,37,128,0),17 },
+       { IPv4(204,37,129,0),24 },
+       { IPv4(204,37,130,0),24 },
+       { IPv4(204,37,131,0),24 },
+       { IPv4(204,37,132,0),24 },
+       { IPv4(204,37,133,0),24 },
+       { IPv4(204,37,134,0),24 },
+       { IPv4(204,37,136,0),24 },
+       { IPv4(204,37,154,0),24 },
+       { IPv4(204,37,170,0),24 },
+       { IPv4(204,37,182,0),24 },
+       { IPv4(204,37,201,0),24 },
+       { IPv4(204,42,0,0),16 },
+       { IPv4(204,42,48,0),20 },
+       { IPv4(204,43,64,0),18 },
+       { IPv4(204,44,128,0),21 },
+       { IPv4(204,44,136,0),23 },
+       { IPv4(204,44,208,0),20 },
+       { IPv4(204,44,240,0),24 },
+       { IPv4(204,44,241,0),24 },
+       { IPv4(204,44,244,0),24 },
+       { IPv4(204,44,245,0),24 },
+       { IPv4(204,44,246,0),24 },
+       { IPv4(204,44,248,0),24 },
+       { IPv4(204,44,249,0),24 },
+       { IPv4(204,44,250,0),24 },
+       { IPv4(204,44,251,0),24 },
+       { IPv4(204,44,252,0),24 },
+       { IPv4(204,44,253,0),24 },
+       { IPv4(204,44,254,0),24 },
+       { IPv4(204,44,255,0),24 },
+       { IPv4(204,48,8,0),24 },
+       { IPv4(204,48,32,0),19 },
+       { IPv4(204,48,128,0),17 },
+       { IPv4(204,50,76,0),24 },
+       { IPv4(204,50,235,0),24 },
+       { IPv4(204,50,236,0),24 },
+       { IPv4(204,52,135,0),24 },
+       { IPv4(204,52,175,0),24 },
+       { IPv4(204,52,176,0),24 },
+       { IPv4(204,52,177,0),24 },
+       { IPv4(204,52,178,0),24 },
+       { IPv4(204,52,187,0),24 },
+       { IPv4(204,52,188,0),24 },
+       { IPv4(204,52,191,0),24 },
+       { IPv4(204,52,215,0),24 },
+       { IPv4(204,52,223,0),24 },
+       { IPv4(204,52,238,0),24 },
+       { IPv4(204,52,242,0),24 },
+       { IPv4(204,52,244,0),24 },
+       { IPv4(204,52,245,0),24 },
+       { IPv4(204,52,246,0),23 },
+       { IPv4(204,53,0,0),16 },
+       { IPv4(204,54,0,0),16 },
+       { IPv4(204,56,0,0),21 },
+       { IPv4(204,56,64,0),19 },
+       { IPv4(204,56,64,0),18 },
+       { IPv4(204,56,96,0),22 },
+       { IPv4(204,56,100,0),23 },
+       { IPv4(204,56,102,0),24 },
+       { IPv4(204,56,104,0),24 },
+       { IPv4(204,56,105,0),24 },
+       { IPv4(204,56,106,0),24 },
+       { IPv4(204,56,107,0),24 },
+       { IPv4(204,56,108,0),24 },
+       { IPv4(204,56,109,0),24 },
+       { IPv4(204,56,110,0),24 },
+       { IPv4(204,56,111,0),24 },
+       { IPv4(204,56,112,0),21 },
+       { IPv4(204,56,120,0),21 },
+       { IPv4(204,57,32,0),19 },
+       { IPv4(204,57,67,0),24 },
+       { IPv4(204,57,142,0),24 },
+       { IPv4(204,58,30,0),24 },
+       { IPv4(204,58,149,0),24 },
+       { IPv4(204,58,152,0),22 },
+       { IPv4(204,58,224,0),24 },
+       { IPv4(204,58,225,0),24 },
+       { IPv4(204,58,226,0),24 },
+       { IPv4(204,58,227,0),24 },
+       { IPv4(204,58,232,0),22 },
+       { IPv4(204,58,248,0),24 },
+       { IPv4(204,60,0,0),16 },
+       { IPv4(204,60,246,0),24 },
+       { IPv4(204,62,192,0),24 },
+       { IPv4(204,62,200,0),24 },
+       { IPv4(204,62,232,0),24 },
+       { IPv4(204,62,247,0),24 },
+       { IPv4(204,62,248,0),23 },
+       { IPv4(204,62,254,0),24 },
+       { IPv4(204,63,208,0),24 },
+       { IPv4(204,63,209,0),24 },
+       { IPv4(204,63,210,0),24 },
+       { IPv4(204,63,211,0),24 },
+       { IPv4(204,63,212,0),24 },
+       { IPv4(204,64,0,0),14 },
+       { IPv4(204,68,16,0),20 },
+       { IPv4(204,68,25,0),24 },
+       { IPv4(204,68,32,0),19 },
+       { IPv4(204,68,133,0),24 },
+       { IPv4(204,68,140,0),24 },
+       { IPv4(204,68,149,0),24 },
+       { IPv4(204,68,151,0),24 },
+       { IPv4(204,68,152,0),24 },
+       { IPv4(204,68,153,0),24 },
+       { IPv4(204,68,154,0),24 },
+       { IPv4(204,68,168,0),24 },
+       { IPv4(204,68,173,0),24 },
+       { IPv4(204,68,178,0),24 },
+       { IPv4(204,68,186,0),24 },
+       { IPv4(204,68,187,0),24 },
+       { IPv4(204,68,217,0),24 },
+       { IPv4(204,68,227,0),24 },
+       { IPv4(204,68,228,0),24 },
+       { IPv4(204,68,229,0),24 },
+       { IPv4(204,68,230,0),24 },
+       { IPv4(204,68,247,0),24 },
+       { IPv4(204,69,32,0),24 },
+       { IPv4(204,69,128,0),24 },
+       { IPv4(204,69,130,0),24 },
+       { IPv4(204,69,131,0),24 },
+       { IPv4(204,69,132,0),24 },
+       { IPv4(204,69,133,0),24 },
+       { IPv4(204,69,158,0),23 },
+       { IPv4(204,69,160,0),23 },
+       { IPv4(204,69,162,0),24 },
+       { IPv4(204,69,169,0),24 },
+       { IPv4(204,69,177,0),24 },
+       { IPv4(204,69,178,0),24 },
+       { IPv4(204,69,198,0),23 },
+       { IPv4(204,69,200,0),24 },
+       { IPv4(204,69,207,0),24 },
+       { IPv4(204,69,220,0),24 },
+       { IPv4(204,69,222,0),24 },
+       { IPv4(204,69,224,0),22 },
+       { IPv4(204,69,225,0),24 },
+       { IPv4(204,69,226,0),23 },
+       { IPv4(204,69,228,0),24 },
+       { IPv4(204,69,229,0),24 },
+       { IPv4(204,69,230,0),24 },
+       { IPv4(204,69,233,0),24 },
+       { IPv4(204,71,12,0),23 },
+       { IPv4(204,71,21,0),24 },
+       { IPv4(204,71,31,0),24 },
+       { IPv4(204,71,65,0),24 },
+       { IPv4(204,71,102,0),24 },
+       { IPv4(204,71,127,0),24 },
+       { IPv4(204,71,154,0),24 },
+       { IPv4(204,71,212,0),24 },
+       { IPv4(204,71,213,0),24 },
+       { IPv4(204,72,0,0),17 },
+       { IPv4(204,72,0,0),15 },
+       { IPv4(204,72,192,0),19 },
+       { IPv4(204,72,224,0),23 },
+       { IPv4(204,72,226,0),23 },
+       { IPv4(204,72,228,0),22 },
+       { IPv4(204,72,232,0),22 },
+       { IPv4(204,72,246,0),23 },
+       { IPv4(204,72,248,0),21 },
+       { IPv4(204,73,0,0),19 },
+       { IPv4(204,73,36,0),22 },
+       { IPv4(204,73,43,0),24 },
+       { IPv4(204,73,79,0),24 },
+       { IPv4(204,73,80,0),22 },
+       { IPv4(204,73,104,0),22 },
+       { IPv4(204,73,160,0),21 },
+       { IPv4(204,73,192,0),21 },
+       { IPv4(204,74,0,0),21 },
+       { IPv4(204,74,8,0),23 },
+       { IPv4(204,74,100,0),24 },
+       { IPv4(204,74,101,0),24 },
+       { IPv4(204,74,107,0),24 },
+       { IPv4(204,74,108,0),24 },
+       { IPv4(204,75,146,0),24 },
+       { IPv4(204,75,153,0),24 },
+       { IPv4(204,75,154,0),24 },
+       { IPv4(204,75,156,0),24 },
+       { IPv4(204,75,161,0),24 },
+       { IPv4(204,75,162,0),24 },
+       { IPv4(204,75,195,0),24 },
+       { IPv4(204,75,207,0),24 },
+       { IPv4(204,75,209,0),24 },
+       { IPv4(204,75,228,0),24 },
+       { IPv4(204,75,238,0),24 },
+       { IPv4(204,75,249,0),24 },
+       { IPv4(204,75,250,0),23 },
+       { IPv4(204,75,252,0),22 },
+       { IPv4(204,76,0,0),21 },
+       { IPv4(204,76,113,0),24 },
+       { IPv4(204,76,152,0),22 },
+       { IPv4(204,76,156,0),24 },
+       { IPv4(204,76,174,0),23 },
+       { IPv4(204,76,176,0),22 },
+       { IPv4(204,76,180,0),23 },
+       { IPv4(204,76,182,0),24 },
+       { IPv4(204,76,190,0),23 },
+       { IPv4(204,76,192,0),22 },
+       { IPv4(204,77,32,0),19 },
+       { IPv4(204,77,78,0),24 },
+       { IPv4(204,77,134,0),24 },
+       { IPv4(204,77,141,0),24 },
+       { IPv4(204,77,142,0),24 },
+       { IPv4(204,77,145,0),24 },
+       { IPv4(204,77,146,0),24 },
+       { IPv4(204,77,148,0),23 },
+       { IPv4(204,77,156,0),24 },
+       { IPv4(204,77,159,0),24 },
+       { IPv4(204,77,164,0),24 },
+       { IPv4(204,77,166,0),24 },
+       { IPv4(204,77,167,0),24 },
+       { IPv4(204,77,181,0),24 },
+       { IPv4(204,78,32,0),19 },
+       { IPv4(204,79,190,0),24 },
+       { IPv4(204,80,132,0),24 },
+       { IPv4(204,80,136,0),24 },
+       { IPv4(204,80,150,0),24 },
+       { IPv4(204,80,212,0),24 },
+       { IPv4(204,80,213,0),24 },
+       { IPv4(204,80,221,0),24 },
+       { IPv4(204,80,222,0),24 },
+       { IPv4(204,86,120,0),21 },
+       { IPv4(204,86,128,0),23 },
+       { IPv4(204,86,144,0),22 },
+       { IPv4(204,86,144,0),21 },
+       { IPv4(204,87,133,0),24 },
+       { IPv4(204,87,151,0),24 },
+       { IPv4(204,87,158,0),24 },
+       { IPv4(204,87,163,0),24 },
+       { IPv4(204,87,178,0),24 },
+       { IPv4(204,87,183,0),24 },
+       { IPv4(204,87,185,0),24 },
+       { IPv4(204,87,187,0),24 },
+       { IPv4(204,87,220,0),24 },
+       { IPv4(204,87,230,0),24 },
+       { IPv4(204,88,64,0),19 },
+       { IPv4(204,88,128,0),19 },
+       { IPv4(204,88,224,0),19 },
+       { IPv4(204,89,8,0),21 },
+       { IPv4(204,89,49,0),24 },
+       { IPv4(204,89,56,0),24 },
+       { IPv4(204,89,129,0),24 },
+       { IPv4(204,89,132,0),23 },
+       { IPv4(204,89,138,0),24 },
+       { IPv4(204,89,139,0),24 },
+       { IPv4(204,89,140,0),24 },
+       { IPv4(204,89,144,0),24 },
+       { IPv4(204,89,155,0),24 },
+       { IPv4(204,89,163,0),24 },
+       { IPv4(204,89,164,0),22 },
+       { IPv4(204,89,172,0),24 },
+       { IPv4(204,89,181,0),24 },
+       { IPv4(204,89,187,0),24 },
+       { IPv4(204,89,188,0),24 },
+       { IPv4(204,89,197,0),24 },
+       { IPv4(204,89,200,0),24 },
+       { IPv4(204,89,216,0),24 },
+       { IPv4(204,89,219,0),24 },
+       { IPv4(204,89,226,0),24 },
+       { IPv4(204,89,231,0),24 },
+       { IPv4(204,89,244,0),24 },
+       { IPv4(204,89,251,0),24 },
+       { IPv4(204,89,254,0),24 },
+       { IPv4(204,90,69,0),24 },
+       { IPv4(204,90,78,0),24 },
+       { IPv4(204,90,119,0),24 },
+       { IPv4(204,90,120,0),23 },
+       { IPv4(204,90,122,0),24 },
+       { IPv4(204,90,181,0),24 },
+       { IPv4(204,90,182,0),24 },
+       { IPv4(204,91,11,0),24 },
+       { IPv4(204,91,12,0),24 },
+       { IPv4(204,91,139,0),24 },
+       { IPv4(204,91,156,0),24 },
+       { IPv4(204,92,43,0),24 },
+       { IPv4(204,92,73,0),24 },
+       { IPv4(204,92,91,0),24 },
+       { IPv4(204,92,234,0),23 },
+       { IPv4(204,92,254,0),24 },
+       { IPv4(204,94,39,0),24 },
+       { IPv4(204,94,40,0),21 },
+       { IPv4(204,94,64,0),19 },
+       { IPv4(204,94,112,0),21 },
+       { IPv4(204,94,115,0),24 },
+       { IPv4(204,94,118,0),24 },
+       { IPv4(204,94,119,0),24 },
+       { IPv4(204,94,129,0),24 },
+       { IPv4(204,94,144,0),20 },
+       { IPv4(204,94,248,0),23 },
+       { IPv4(204,94,249,0),24 },
+       { IPv4(204,95,160,0),19 },
+       { IPv4(204,95,192,0),19 },
+       { IPv4(204,96,112,0),24 },
+       { IPv4(204,96,224,0),19 },
+       { IPv4(204,97,2,0),24 },
+       { IPv4(204,97,3,0),24 },
+       { IPv4(204,97,32,0),19 },
+       { IPv4(204,97,64,0),21 },
+       { IPv4(204,97,89,0),24 },
+       { IPv4(204,97,104,0),24 },
+       { IPv4(204,98,0,0),16 },
+       { IPv4(204,99,0,0),17 },
+       { IPv4(204,99,128,0),18 },
+       { IPv4(204,99,158,0),24 },
+       { IPv4(204,99,160,0),24 },
+       { IPv4(204,99,179,0),24 },
+       { IPv4(204,99,192,0),19 },
+       { IPv4(204,99,224,0),19 },
+       { IPv4(204,101,30,0),24 },
+       { IPv4(204,101,31,0),24 },
+       { IPv4(204,101,33,0),24 },
+       { IPv4(204,101,34,0),24 },
+       { IPv4(204,101,106,0),24 },
+       { IPv4(204,101,111,0),24 },
+       { IPv4(204,101,113,0),24 },
+       { IPv4(204,101,115,0),24 },
+       { IPv4(204,101,118,0),24 },
+       { IPv4(204,102,0,0),16 },
+       { IPv4(204,102,10,0),23 },
+       { IPv4(204,102,12,0),22 },
+       { IPv4(204,102,16,0),20 },
+       { IPv4(204,102,32,0),20 },
+       { IPv4(204,102,48,0),21 },
+       { IPv4(204,102,56,0),22 },
+       { IPv4(204,102,60,0),23 },
+       { IPv4(204,102,62,0),24 },
+       { IPv4(204,102,115,0),24 },
+       { IPv4(204,102,116,0),22 },
+       { IPv4(204,102,120,0),21 },
+       { IPv4(204,102,128,0),23 },
+       { IPv4(204,102,234,0),23 },
+       { IPv4(204,103,157,0),24 },
+       { IPv4(204,103,158,0),24 },
+       { IPv4(204,103,226,0),24 },
+       { IPv4(204,104,55,0),24 },
+       { IPv4(204,104,132,0),24 },
+       { IPv4(204,104,133,0),24 },
+       { IPv4(204,104,134,0),24 },
+       { IPv4(204,104,135,0),24 },
+       { IPv4(204,104,140,0),24 },
+       { IPv4(204,106,32,0),19 },
+       { IPv4(204,106,62,0),23 },
+       { IPv4(204,107,60,0),22 },
+       { IPv4(204,107,76,0),24 },
+       { IPv4(204,107,77,0),24 },
+       { IPv4(204,107,85,0),24 },
+       { IPv4(204,107,91,0),24 },
+       { IPv4(204,107,104,0),24 },
+       { IPv4(204,107,105,0),24 },
+       { IPv4(204,107,107,0),24 },
+       { IPv4(204,107,109,0),24 },
+       { IPv4(204,107,120,0),24 },
+       { IPv4(204,107,129,0),24 },
+       { IPv4(204,107,130,0),24 },
+       { IPv4(204,107,133,0),24 },
+       { IPv4(204,107,143,0),24 },
+       { IPv4(204,107,154,0),24 },
+       { IPv4(204,107,168,0),24 },
+       { IPv4(204,107,178,0),24 },
+       { IPv4(204,107,183,0),24 },
+       { IPv4(204,107,200,0),24 },
+       { IPv4(204,107,211,0),24 },
+       { IPv4(204,107,232,0),24 },
+       { IPv4(204,107,238,0),24 },
+       { IPv4(204,107,242,0),24 },
+       { IPv4(204,107,249,0),24 },
+       { IPv4(204,107,252,0),24 },
+       { IPv4(204,107,254,0),24 },
+       { IPv4(204,108,0,0),21 },
+       { IPv4(204,108,8,0),24 },
+       { IPv4(204,108,9,0),24 },
+       { IPv4(204,108,10,0),24 },
+       { IPv4(204,108,16,0),24 },
+       { IPv4(204,110,0,0),21 },
+       { IPv4(204,110,135,0),24 },
+       { IPv4(204,110,138,0),24 },
+       { IPv4(204,110,164,0),24 },
+       { IPv4(204,110,167,0),24 },
+       { IPv4(204,110,169,0),24 },
+       { IPv4(204,110,226,0),24 },
+       { IPv4(204,112,48,0),22 },
+       { IPv4(204,112,90,0),24 },
+       { IPv4(204,112,91,0),24 },
+       { IPv4(204,112,103,0),24 },
+       { IPv4(204,112,108,0),24 },
+       { IPv4(204,112,109,0),24 },
+       { IPv4(204,112,122,0),24 },
+       { IPv4(204,112,126,0),24 },
+       { IPv4(204,112,130,0),23 },
+       { IPv4(204,112,132,0),22 },
+       { IPv4(204,112,158,0),24 },
+       { IPv4(204,112,189,0),24 },
+       { IPv4(204,112,235,0),24 },
+       { IPv4(204,112,237,0),24 },
+       { IPv4(204,113,0,0),16 },
+       { IPv4(204,113,91,0),24 },
+       { IPv4(204,113,123,0),24 },
+       { IPv4(204,114,64,0),18 },
+       { IPv4(204,114,253,0),24 },
+       { IPv4(204,115,76,0),24 },
+       { IPv4(204,115,88,0),24 },
+       { IPv4(204,115,89,0),24 },
+       { IPv4(204,115,90,0),24 },
+       { IPv4(204,115,91,0),24 },
+       { IPv4(204,115,92,0),24 },
+       { IPv4(204,115,93,0),24 },
+       { IPv4(204,115,94,0),24 },
+       { IPv4(204,115,95,0),24 },
+       { IPv4(204,115,96,0),24 },
+       { IPv4(204,115,97,0),24 },
+       { IPv4(204,115,98,0),24 },
+       { IPv4(204,115,99,0),24 },
+       { IPv4(204,115,121,0),24 },
+       { IPv4(204,115,164,0),24 },
+       { IPv4(204,115,215,0),24 },
+       { IPv4(204,115,216,0),21 },
+       { IPv4(204,115,224,0),24 },
+       { IPv4(204,115,225,0),24 },
+       { IPv4(204,115,226,0),24 },
+       { IPv4(204,115,227,0),24 },
+       { IPv4(204,115,228,0),24 },
+       { IPv4(204,115,229,0),24 },
+       { IPv4(204,115,230,0),24 },
+       { IPv4(204,116,0,0),16 },
+       { IPv4(204,117,91,0),24 },
+       { IPv4(204,117,224,0),23 },
+       { IPv4(204,117,240,0),20 },
+       { IPv4(204,118,32,0),24 },
+       { IPv4(204,118,52,0),22 },
+       { IPv4(204,118,120,0),23 },
+       { IPv4(204,118,174,0),24 },
+       { IPv4(204,119,0,0),24 },
+       { IPv4(204,119,1,0),24 },
+       { IPv4(204,119,56,0),22 },
+       { IPv4(204,119,64,0),18 },
+       { IPv4(204,119,248,0),21 },
+       { IPv4(204,119,249,0),24 },
+       { IPv4(204,119,255,0),24 },
+       { IPv4(204,120,8,0),21 },
+       { IPv4(204,120,80,0),20 },
+       { IPv4(204,120,138,0),24 },
+       { IPv4(204,120,144,0),20 },
+       { IPv4(204,121,0,0),16 },
+       { IPv4(204,122,0,0),21 },
+       { IPv4(204,123,0,0),16 },
+       { IPv4(204,124,82,0),24 },
+       { IPv4(204,124,85,0),24 },
+       { IPv4(204,124,86,0),24 },
+       { IPv4(204,124,92,0),24 },
+       { IPv4(204,124,93,0),24 },
+       { IPv4(204,124,104,0),24 },
+       { IPv4(204,124,105,0),24 },
+       { IPv4(204,124,106,0),24 },
+       { IPv4(204,124,107,0),24 },
+       { IPv4(204,124,116,0),24 },
+       { IPv4(204,124,120,0),24 },
+       { IPv4(204,124,121,0),24 },
+       { IPv4(204,124,122,0),24 },
+       { IPv4(204,124,123,0),24 },
+       { IPv4(204,124,132,0),24 },
+       { IPv4(204,124,133,0),24 },
+       { IPv4(204,124,134,0),24 },
+       { IPv4(204,124,135,0),24 },
+       { IPv4(204,124,137,0),24 },
+       { IPv4(204,124,160,0),22 },
+       { IPv4(204,124,164,0),23 },
+       { IPv4(204,124,166,0),24 },
+       { IPv4(204,124,197,0),24 },
+       { IPv4(204,124,208,0),23 },
+       { IPv4(204,124,244,0),24 },
+       { IPv4(204,124,245,0),24 },
+       { IPv4(204,124,246,0),24 },
+       { IPv4(204,124,247,0),24 },
+       { IPv4(204,125,142,0),24 },
+       { IPv4(204,126,22,0),24 },
+       { IPv4(204,126,122,0),24 },
+       { IPv4(204,126,123,0),24 },
+       { IPv4(204,126,134,0),24 },
+       { IPv4(204,126,135,0),24 },
+       { IPv4(204,126,172,0),24 },
+       { IPv4(204,126,173,0),24 },
+       { IPv4(204,126,198,0),24 },
+       { IPv4(204,126,199,0),24 },
+       { IPv4(204,126,242,0),24 },
+       { IPv4(204,126,243,0),24 },
+       { IPv4(204,126,250,0),23 },
+       { IPv4(204,126,250,0),24 },
+       { IPv4(204,126,251,0),24 },
+       { IPv4(204,126,254,0),23 },
+       { IPv4(204,127,64,0),20 },
+       { IPv4(204,127,128,0),17 },
+       { IPv4(204,127,192,0),20 },
+       { IPv4(204,128,32,0),20 },
+       { IPv4(204,128,48,0),22 },
+       { IPv4(204,128,147,0),24 },
+       { IPv4(204,128,156,0),24 },
+       { IPv4(204,128,158,0),24 },
+       { IPv4(204,128,167,0),24 },
+       { IPv4(204,128,175,0),24 },
+       { IPv4(204,128,179,0),24 },
+       { IPv4(204,128,180,0),24 },
+       { IPv4(204,128,192,0),24 },
+       { IPv4(204,128,199,0),24 },
+       { IPv4(204,128,213,0),24 },
+       { IPv4(204,128,215,0),24 },
+       { IPv4(204,128,226,0),24 },
+       { IPv4(204,128,227,0),24 },
+       { IPv4(204,128,232,0),22 },
+       { IPv4(204,128,232,0),24 },
+       { IPv4(204,128,236,0),22 },
+       { IPv4(204,130,138,0),24 },
+       { IPv4(204,130,166,0),24 },
+       { IPv4(204,130,176,0),24 },
+       { IPv4(204,130,184,0),24 },
+       { IPv4(204,130,185,0),24 },
+       { IPv4(204,130,191,0),24 },
+       { IPv4(204,130,198,0),24 },
+       { IPv4(204,130,216,0),24 },
+       { IPv4(204,130,226,0),23 },
+       { IPv4(204,130,228,0),22 },
+       { IPv4(204,130,232,0),22 },
+       { IPv4(204,130,236,0),23 },
+       { IPv4(204,130,244,0),24 },
+       { IPv4(204,130,248,0),24 },
+       { IPv4(204,131,0,0),16 },
+       { IPv4(204,131,62,0),24 },
+       { IPv4(204,131,105,0),24 },
+       { IPv4(204,131,176,0),23 },
+       { IPv4(204,131,188,0),24 },
+       { IPv4(204,132,0,0),15 },
+       { IPv4(204,132,148,0),24 },
+       { IPv4(204,132,224,0),20 },
+       { IPv4(204,133,127,0),24 },
+       { IPv4(204,134,131,0),24 },
+       { IPv4(204,134,132,0),24 },
+       { IPv4(204,134,133,0),24 },
+       { IPv4(204,134,135,0),24 },
+       { IPv4(204,134,136,0),24 },
+       { IPv4(204,134,137,0),24 },
+       { IPv4(204,134,142,0),24 },
+       { IPv4(204,134,144,0),24 },
+       { IPv4(204,134,147,0),24 },
+       { IPv4(204,134,150,0),24 },
+       { IPv4(204,134,194,0),23 },
+       { IPv4(204,134,210,0),24 },
+       { IPv4(204,134,217,0),24 },
+       { IPv4(204,134,219,0),24 },
+       { IPv4(204,134,220,0),24 },
+       { IPv4(204,134,240,0),23 },
+       { IPv4(204,134,251,0),24 },
+       { IPv4(204,134,252,0),22 },
+       { IPv4(204,136,23,0),24 },
+       { IPv4(204,136,24,0),23 },
+       { IPv4(204,136,26,0),23 },
+       { IPv4(204,136,28,0),23 },
+       { IPv4(204,137,183,0),24 },
+       { IPv4(204,137,199,0),24 },
+       { IPv4(204,138,27,0),24 },
+       { IPv4(204,138,44,0),22 },
+       { IPv4(204,138,48,0),22 },
+       { IPv4(204,138,52,0),22 },
+       { IPv4(204,138,56,0),22 },
+       { IPv4(204,138,68,0),24 },
+       { IPv4(204,138,71,0),24 },
+       { IPv4(204,138,91,0),24 },
+       { IPv4(204,138,103,0),24 },
+       { IPv4(204,138,108,0),24 },
+       { IPv4(204,138,111,0),24 },
+       { IPv4(204,138,115,0),24 },
+       { IPv4(204,138,128,0),21 },
+       { IPv4(204,138,135,0),24 },
+       { IPv4(204,138,172,0),24 },
+       { IPv4(204,138,236,0),24 },
+       { IPv4(204,138,237,0),24 },
+       { IPv4(204,138,239,0),24 },
+       { IPv4(204,139,45,0),24 },
+       { IPv4(204,139,64,0),18 },
+       { IPv4(204,140,15,0),24 },
+       { IPv4(204,140,32,0),19 },
+       { IPv4(204,140,71,0),24 },
+       { IPv4(204,140,172,0),23 },
+       { IPv4(204,140,245,0),24 },
+       { IPv4(204,141,0,0),16 },
+       { IPv4(204,141,44,0),22 },
+       { IPv4(204,141,64,0),20 },
+       { IPv4(204,141,101,0),24 },
+       { IPv4(204,141,207,0),24 },
+       { IPv4(204,141,235,0),24 },
+       { IPv4(204,142,0,0),15 },
+       { IPv4(204,142,178,0),24 },
+       { IPv4(204,143,35,0),24 },
+       { IPv4(204,143,36,0),24 },
+       { IPv4(204,143,156,0),24 },
+       { IPv4(204,143,170,0),24 },
+       { IPv4(204,144,32,0),20 },
+       { IPv4(204,144,48,0),21 },
+       { IPv4(204,144,56,0),24 },
+       { IPv4(204,144,76,0),24 },
+       { IPv4(204,144,106,0),24 },
+       { IPv4(204,144,128,0),17 },
+       { IPv4(204,144,128,0),24 },
+       { IPv4(204,144,129,0),24 },
+       { IPv4(204,144,130,0),24 },
+       { IPv4(204,144,131,0),24 },
+       { IPv4(204,144,132,0),24 },
+       { IPv4(204,144,133,0),24 },
+       { IPv4(204,144,140,0),24 },
+       { IPv4(204,144,141,0),24 },
+       { IPv4(204,144,168,0),24 },
+       { IPv4(204,144,174,0),24 },
+       { IPv4(204,144,179,0),24 },
+       { IPv4(204,144,182,0),24 },
+       { IPv4(204,144,184,0),24 },
+       { IPv4(204,144,244,0),24 },
+       { IPv4(204,145,119,0),24 },
+       { IPv4(204,145,144,0),24 },
+       { IPv4(204,145,147,0),24 },
+       { IPv4(204,145,148,0),24 },
+       { IPv4(204,145,160,0),24 },
+       { IPv4(204,145,167,0),24 },
+       { IPv4(204,145,171,0),24 },
+       { IPv4(204,145,186,0),24 },
+       { IPv4(204,145,211,0),24 },
+       { IPv4(204,145,215,0),24 },
+       { IPv4(204,145,225,0),24 },
+       { IPv4(204,145,230,0),24 },
+       { IPv4(204,145,255,0),24 },
+       { IPv4(204,146,0,0),16 },
+       { IPv4(204,146,19,0),24 },
+       { IPv4(204,146,20,0),24 },
+       { IPv4(204,146,21,0),24 },
+       { IPv4(204,146,22,0),24 },
+       { IPv4(204,146,23,0),24 },
+       { IPv4(204,146,24,0),22 },
+       { IPv4(204,146,50,0),24 },
+       { IPv4(204,146,60,0),24 },
+       { IPv4(204,146,86,0),24 },
+       { IPv4(204,146,133,0),24 },
+       { IPv4(204,146,134,0),24 },
+       { IPv4(204,146,140,0),24 },
+       { IPv4(204,146,150,0),24 },
+       { IPv4(204,146,157,0),24 },
+       { IPv4(204,146,159,0),24 },
+       { IPv4(204,146,165,0),24 },
+       { IPv4(204,146,167,0),24 },
+       { IPv4(204,146,173,0),24 },
+       { IPv4(204,146,179,0),24 },
+       { IPv4(204,146,189,0),24 },
+       { IPv4(204,146,209,0),24 },
+       { IPv4(204,146,230,0),24 },
+       { IPv4(204,146,231,0),24 },
+       { IPv4(204,146,237,0),24 },
+       { IPv4(204,147,16,0),20 },
+       { IPv4(204,147,55,0),24 },
+       { IPv4(204,148,71,0),24 },
+       { IPv4(204,148,72,0),24 },
+       { IPv4(204,148,80,0),21 },
+       { IPv4(204,148,96,0),21 },
+       { IPv4(204,148,108,0),24 },
+       { IPv4(204,148,144,0),20 },
+       { IPv4(204,148,160,0),20 },
+       { IPv4(204,149,96,0),20 },
+       { IPv4(204,149,112,0),21 },
+       { IPv4(204,149,167,0),24 },
+       { IPv4(204,151,57,0),24 },
+       { IPv4(204,152,12,0),23 },
+       { IPv4(204,152,24,0),24 },
+       { IPv4(204,152,25,0),24 },
+       { IPv4(204,152,42,0),24 },
+       { IPv4(204,152,42,0),23 },
+       { IPv4(204,152,43,0),24 },
+       { IPv4(204,152,46,0),23 },
+       { IPv4(204,152,48,0),24 },
+       { IPv4(204,152,49,0),24 },
+       { IPv4(204,152,56,0),23 },
+       { IPv4(204,152,60,0),24 },
+       { IPv4(204,152,70,0),23 },
+       { IPv4(204,152,80,0),23 },
+       { IPv4(204,152,98,0),24 },
+       { IPv4(204,152,109,0),24 },
+       { IPv4(204,152,114,0),23 },
+       { IPv4(204,152,134,0),23 },
+       { IPv4(204,152,142,0),24 },
+       { IPv4(204,152,143,0),24 },
+       { IPv4(204,152,156,0),22 },
+       { IPv4(204,152,157,0),24 },
+       { IPv4(204,152,159,0),24 },
+       { IPv4(204,152,178,0),24 },
+       { IPv4(204,152,184,0),21 },
+       { IPv4(204,152,186,0),24 },
+       { IPv4(204,152,187,0),24 },
+       { IPv4(204,153,8,0),22 },
+       { IPv4(204,153,49,0),24 },
+       { IPv4(204,153,51,0),24 },
+       { IPv4(204,153,60,0),24 },
+       { IPv4(204,153,61,0),24 },
+       { IPv4(204,153,62,0),24 },
+       { IPv4(204,153,63,0),24 },
+       { IPv4(204,153,68,0),24 },
+       { IPv4(204,153,71,0),24 },
+       { IPv4(204,153,96,0),22 },
+       { IPv4(204,153,134,0),24 },
+       { IPv4(204,153,155,0),24 },
+       { IPv4(204,153,175,0),24 },
+       { IPv4(204,153,198,0),24 },
+       { IPv4(204,153,244,0),22 },
+       { IPv4(204,154,32,0),21 },
+       { IPv4(204,154,192,0),21 },
+       { IPv4(204,154,228,0),24 },
+       { IPv4(204,155,0,0),20 },
+       { IPv4(204,155,16,0),21 },
+       { IPv4(204,155,16,0),24 },
+       { IPv4(204,155,24,0),23 },
+       { IPv4(204,155,56,0),24 },
+       { IPv4(204,155,96,0),20 },
+       { IPv4(204,155,122,0),24 },
+       { IPv4(204,155,141,0),24 },
+       { IPv4(204,155,160,0),20 },
+       { IPv4(204,155,226,0),24 },
+       { IPv4(204,156,0,0),19 },
+       { IPv4(204,156,78,0),24 },
+       { IPv4(204,156,84,0),24 },
+       { IPv4(204,156,96,0),20 },
+       { IPv4(204,156,112,0),21 },
+       { IPv4(204,156,120,0),24 },
+       { IPv4(204,156,128,0),19 },
+       { IPv4(204,157,11,0),24 },
+       { IPv4(204,157,211,0),24 },
+       { IPv4(204,157,238,0),24 },
+       { IPv4(204,159,36,0),22 },
+       { IPv4(204,159,107,0),24 },
+       { IPv4(204,159,108,0),22 },
+       { IPv4(204,162,80,0),21 },
+       { IPv4(204,163,170,0),24 },
+       { IPv4(204,164,98,0),23 },
+       { IPv4(204,164,100,0),23 },
+       { IPv4(204,165,17,0),24 },
+       { IPv4(204,165,18,0),24 },
+       { IPv4(204,168,0,0),16 },
+       { IPv4(204,168,12,0),24 },
+       { IPv4(204,168,16,0),24 },
+       { IPv4(204,168,17,0),24 },
+       { IPv4(204,168,18,0),24 },
+       { IPv4(204,168,19,0),24 },
+       { IPv4(204,168,20,0),24 },
+       { IPv4(204,168,22,0),24 },
+       { IPv4(204,168,23,0),24 },
+       { IPv4(204,168,24,0),24 },
+       { IPv4(204,168,25,0),24 },
+       { IPv4(204,168,26,0),24 },
+       { IPv4(204,168,27,0),24 },
+       { IPv4(204,168,28,0),24 },
+       { IPv4(204,168,29,0),24 },
+       { IPv4(204,168,30,0),24 },
+       { IPv4(204,168,31,0),24 },
+       { IPv4(204,168,51,0),24 },
+       { IPv4(204,168,52,0),22 },
+       { IPv4(204,168,59,0),24 },
+       { IPv4(204,168,62,0),24 },
+       { IPv4(204,168,65,0),24 },
+       { IPv4(204,168,66,0),24 },
+       { IPv4(204,168,67,0),24 },
+       { IPv4(204,168,68,0),24 },
+       { IPv4(204,168,69,0),24 },
+       { IPv4(204,168,70,0),24 },
+       { IPv4(204,168,71,0),24 },
+       { IPv4(204,168,72,0),24 },
+       { IPv4(204,168,75,0),24 },
+       { IPv4(204,168,86,0),24 },
+       { IPv4(204,168,90,0),24 },
+       { IPv4(204,168,91,0),24 },
+       { IPv4(204,168,94,0),24 },
+       { IPv4(204,168,95,0),24 },
+       { IPv4(204,168,112,0),22 },
+       { IPv4(204,168,113,0),24 },
+       { IPv4(204,168,136,0),24 },
+       { IPv4(204,168,150,0),24 },
+       { IPv4(204,168,160,0),19 },
+       { IPv4(204,169,0,0),16 },
+       { IPv4(204,170,0,0),15 },
+       { IPv4(204,170,22,0),24 },
+       { IPv4(204,170,23,0),24 },
+       { IPv4(204,170,37,0),24 },
+       { IPv4(204,170,38,0),24 },
+       { IPv4(204,171,184,0),24 },
+       { IPv4(204,173,234,0),24 },
+       { IPv4(204,174,101,0),24 },
+       { IPv4(204,174,102,0),24 },
+       { IPv4(204,174,112,0),24 },
+       { IPv4(204,174,204,0),23 },
+       { IPv4(204,174,223,0),24 },
+       { IPv4(204,176,148,0),23 },
+       { IPv4(204,176,177,0),24 },
+       { IPv4(204,177,32,0),19 },
+       { IPv4(204,177,80,0),21 },
+       { IPv4(204,177,92,0),24 },
+       { IPv4(204,177,93,0),24 },
+       { IPv4(204,177,154,0),23 },
+       { IPv4(204,178,176,0),23 },
+       { IPv4(204,179,176,0),21 },
+       { IPv4(204,179,240,0),24 },
+       { IPv4(204,180,0,0),20 },
+       { IPv4(204,180,230,0),23 },
+       { IPv4(204,181,37,0),24 },
+       { IPv4(204,181,116,0),24 },
+       { IPv4(204,181,147,0),24 },
+       { IPv4(204,181,149,0),24 },
+       { IPv4(204,182,55,0),24 },
+       { IPv4(204,182,56,0),24 },
+       { IPv4(204,182,64,0),18 },
+       { IPv4(204,182,144,0),24 },
+       { IPv4(204,182,232,0),21 },
+       { IPv4(204,183,80,0),20 },
+       { IPv4(204,183,192,0),20 },
+       { IPv4(204,183,205,0),24 },
+       { IPv4(204,184,0,0),17 },
+       { IPv4(204,184,128,0),18 },
+       { IPv4(204,184,192,0),18 },
+       { IPv4(204,185,0,0),19 },
+       { IPv4(204,185,32,0),19 },
+       { IPv4(204,185,64,0),18 },
+       { IPv4(204,185,128,0),17 },
+       { IPv4(204,186,0,0),16 },
+       { IPv4(204,187,39,0),24 },
+       { IPv4(204,187,48,0),24 },
+       { IPv4(204,187,62,0),23 },
+       { IPv4(204,187,65,0),24 },
+       { IPv4(204,187,78,0),24 },
+       { IPv4(204,187,87,0),24 },
+       { IPv4(204,187,88,0),24 },
+       { IPv4(204,187,89,0),24 },
+       { IPv4(204,187,103,0),24 },
+       { IPv4(204,187,104,0),24 },
+       { IPv4(204,187,105,0),24 },
+       { IPv4(204,187,133,0),24 },
+       { IPv4(204,187,136,0),24 },
+       { IPv4(204,187,138,0),24 },
+       { IPv4(204,187,152,0),24 },
+       { IPv4(204,189,34,0),24 },
+       { IPv4(204,189,82,0),24 },
+       { IPv4(204,189,94,0),23 },
+       { IPv4(204,192,115,0),24 },
+       { IPv4(204,192,127,0),24 },
+       { IPv4(204,193,128,0),19 },
+       { IPv4(204,193,140,0),22 },
+       { IPv4(204,193,152,0),21 },
+       { IPv4(204,194,14,0),24 },
+       { IPv4(204,194,28,0),22 },
+       { IPv4(204,194,64,0),21 },
+       { IPv4(204,194,104,0),23 },
+       { IPv4(204,194,106,0),23 },
+       { IPv4(204,194,108,0),23 },
+       { IPv4(204,194,176,0),21 },
+       { IPv4(204,198,72,0),22 },
+       { IPv4(204,198,76,0),23 },
+       { IPv4(204,198,148,0),23 },
+       { IPv4(204,198,249,0),24 },
+       { IPv4(204,198,250,0),24 },
+       { IPv4(204,200,0,0),14 },
+       { IPv4(204,200,26,0),23 },
+       { IPv4(204,200,103,0),24 },
+       { IPv4(204,200,104,0),24 },
+       { IPv4(204,200,106,0),24 },
+       { IPv4(204,200,108,0),24 },
+       { IPv4(204,200,120,0),24 },
+       { IPv4(204,200,122,0),23 },
+       { IPv4(204,200,130,0),23 },
+       { IPv4(204,200,132,0),23 },
+       { IPv4(204,200,134,0),24 },
+       { IPv4(204,201,25,0),24 },
+       { IPv4(204,201,36,0),22 },
+       { IPv4(204,201,36,0),24 },
+       { IPv4(204,201,37,0),24 },
+       { IPv4(204,201,38,0),23 },
+       { IPv4(204,201,38,0),24 },
+       { IPv4(204,201,39,0),24 },
+       { IPv4(204,201,232,0),21 },
+       { IPv4(204,201,240,0),20 },
+       { IPv4(204,203,20,0),24 },
+       { IPv4(204,203,20,0),22 },
+       { IPv4(204,203,21,0),24 },
+       { IPv4(204,203,22,0),23 },
+       { IPv4(204,203,22,0),24 },
+       { IPv4(204,203,23,0),24 },
+       { IPv4(204,203,32,0),24 },
+       { IPv4(204,203,33,0),24 },
+       { IPv4(204,203,48,0),23 },
+       { IPv4(204,203,50,0),24 },
+       { IPv4(204,208,0,0),16 },
+       { IPv4(204,208,1,0),24 },
+       { IPv4(204,208,2,0),23 },
+       { IPv4(204,208,4,0),22 },
+       { IPv4(204,208,8,0),21 },
+       { IPv4(204,208,16,0),22 },
+       { IPv4(204,208,28,0),22 },
+       { IPv4(204,208,29,0),24 },
+       { IPv4(204,208,31,0),24 },
+       { IPv4(204,208,32,0),24 },
+       { IPv4(204,208,33,0),24 },
+       { IPv4(204,208,35,0),24 },
+       { IPv4(204,208,37,0),24 },
+       { IPv4(204,208,38,0),24 },
+       { IPv4(204,208,40,0),24 },
+       { IPv4(204,208,42,0),24 },
+       { IPv4(204,208,57,0),24 },
+       { IPv4(204,208,80,0),20 },
+       { IPv4(204,208,94,0),24 },
+       { IPv4(204,208,115,0),24 },
+       { IPv4(204,208,168,0),24 },
+       { IPv4(204,208,192,0),24 },
+       { IPv4(204,208,193,0),24 },
+       { IPv4(204,208,194,0),24 },
+       { IPv4(204,208,195,0),24 },
+       { IPv4(204,208,196,0),24 },
+       { IPv4(204,208,197,0),24 },
+       { IPv4(204,208,198,0),24 },
+       { IPv4(204,208,199,0),24 },
+       { IPv4(204,208,203,0),24 },
+       { IPv4(204,208,206,0),24 },
+       { IPv4(204,208,207,0),24 },
+       { IPv4(204,208,208,0),24 },
+       { IPv4(204,208,209,0),24 },
+       { IPv4(204,208,213,0),24 },
+       { IPv4(204,208,215,0),24 },
+       { IPv4(204,208,217,0),24 },
+       { IPv4(204,208,218,0),24 },
+       { IPv4(204,208,219,0),24 },
+       { IPv4(204,208,220,0),24 },
+       { IPv4(204,208,221,0),24 },
+       { IPv4(204,208,222,0),24 },
+       { IPv4(204,209,6,0),24 },
+       { IPv4(204,209,13,0),24 },
+       { IPv4(204,209,14,0),23 },
+       { IPv4(204,209,44,0),24 },
+       { IPv4(204,209,45,0),24 },
+       { IPv4(204,209,52,0),24 },
+       { IPv4(204,209,53,0),24 },
+       { IPv4(204,209,54,0),24 },
+       { IPv4(204,209,55,0),24 },
+       { IPv4(204,209,114,0),24 },
+       { IPv4(204,209,115,0),24 },
+       { IPv4(204,209,136,0),24 },
+       { IPv4(204,209,148,0),24 },
+       { IPv4(204,209,158,0),24 },
+       { IPv4(204,209,186,0),24 },
+       { IPv4(204,209,208,0),21 },
+       { IPv4(204,210,0,0),20 },
+       { IPv4(204,210,16,0),20 },
+       { IPv4(204,210,32,0),20 },
+       { IPv4(204,210,48,0),20 },
+       { IPv4(204,210,64,0),19 },
+       { IPv4(204,210,96,0),19 },
+       { IPv4(204,210,128,0),19 },
+       { IPv4(204,210,160,0),20 },
+       { IPv4(204,210,176,0),20 },
+       { IPv4(204,210,192,0),19 },
+       { IPv4(204,210,224,0),20 },
+       { IPv4(204,210,240,0),21 },
+       { IPv4(204,210,248,0),22 },
+       { IPv4(204,210,252,0),24 },
+       { IPv4(204,212,128,0),19 },
+       { IPv4(204,212,160,0),22 },
+       { IPv4(204,212,161,0),24 },
+       { IPv4(204,212,163,0),24 },
+       { IPv4(204,213,88,0),23 },
+       { IPv4(204,213,90,0),23 },
+       { IPv4(204,213,94,0),23 },
+       { IPv4(204,213,96,0),20 },
+       { IPv4(204,213,176,0),20 },
+       { IPv4(204,214,7,0),24 },
+       { IPv4(204,214,144,0),20 },
+       { IPv4(204,216,0,0),17 },
+       { IPv4(204,216,17,0),24 },
+       { IPv4(204,216,18,0),24 },
+       { IPv4(204,216,19,0),24 },
+       { IPv4(204,216,20,0),24 },
+       { IPv4(204,216,97,0),24 },
+       { IPv4(204,216,101,0),24 },
+       { IPv4(204,216,102,0),24 },
+       { IPv4(204,216,103,0),24 },
+       { IPv4(204,216,128,0),17 },
+       { IPv4(204,218,0,0),15 },
+       { IPv4(204,219,0,0),17 },
+       { IPv4(204,220,0,0),15 },
+       { IPv4(204,220,39,0),24 },
+       { IPv4(204,220,64,0),18 },
+       { IPv4(204,220,128,0),22 },
+       { IPv4(204,220,160,0),19 },
+       { IPv4(204,220,179,0),24 },
+       { IPv4(204,220,181,0),24 },
+       { IPv4(204,220,182,0),24 },
+       { IPv4(204,220,183,0),24 },
+       { IPv4(204,220,184,0),24 },
+       { IPv4(204,220,188,0),24 },
+       { IPv4(204,220,189,0),24 },
+       { IPv4(204,220,190,0),24 },
+       { IPv4(204,220,192,0),21 },
+       { IPv4(204,220,200,0),21 },
+       { IPv4(204,220,208,0),21 },
+       { IPv4(204,220,216,0),21 },
+       { IPv4(204,221,36,0),22 },
+       { IPv4(204,221,76,0),24 },
+       { IPv4(204,221,240,0),21 },
+       { IPv4(204,222,0,0),18 },
+       { IPv4(204,222,0,0),23 },
+       { IPv4(204,222,2,0),24 },
+       { IPv4(204,222,6,0),23 },
+       { IPv4(204,222,8,0),23 },
+       { IPv4(204,222,20,0),22 },
+       { IPv4(204,222,24,0),24 },
+       { IPv4(204,222,25,0),24 },
+       { IPv4(204,222,26,0),24 },
+       { IPv4(204,222,27,0),24 },
+       { IPv4(204,222,32,0),24 },
+       { IPv4(204,222,33,0),24 },
+       { IPv4(204,222,34,0),24 },
+       { IPv4(204,222,35,0),24 },
+       { IPv4(204,222,36,0),24 },
+       { IPv4(204,222,37,0),24 },
+       { IPv4(204,222,38,0),24 },
+       { IPv4(204,222,39,0),24 },
+       { IPv4(204,222,40,0),24 },
+       { IPv4(204,222,42,0),24 },
+       { IPv4(204,222,43,0),24 },
+       { IPv4(204,222,44,0),24 },
+       { IPv4(204,222,45,0),24 },
+       { IPv4(204,222,46,0),24 },
+       { IPv4(204,222,47,0),24 },
+       { IPv4(204,222,64,0),21 },
+       { IPv4(204,222,72,0),22 },
+       { IPv4(204,222,76,0),24 },
+       { IPv4(204,222,77,0),24 },
+       { IPv4(204,222,80,0),21 },
+       { IPv4(204,222,88,0),22 },
+       { IPv4(204,222,96,0),19 },
+       { IPv4(204,222,142,0),23 },
+       { IPv4(204,222,143,0),24 },
+       { IPv4(204,222,144,0),23 },
+       { IPv4(204,222,144,0),20 },
+       { IPv4(204,222,146,0),24 },
+       { IPv4(204,222,149,0),24 },
+       { IPv4(204,222,158,0),24 },
+       { IPv4(204,222,159,0),24 },
+       { IPv4(204,222,160,0),24 },
+       { IPv4(204,222,160,0),19 },
+       { IPv4(204,222,163,0),24 },
+       { IPv4(204,222,167,0),24 },
+       { IPv4(204,222,168,0),24 },
+       { IPv4(204,222,169,0),24 },
+       { IPv4(204,222,170,0),24 },
+       { IPv4(204,222,173,0),24 },
+       { IPv4(204,222,176,0),24 },
+       { IPv4(204,222,177,0),24 },
+       { IPv4(204,222,178,0),24 },
+       { IPv4(204,222,179,0),24 },
+       { IPv4(204,222,192,0),18 },
+       { IPv4(204,222,214,0),23 },
+       { IPv4(204,222,220,0),24 },
+       { IPv4(204,222,221,0),24 },
+       { IPv4(204,222,228,0),24 },
+       { IPv4(204,222,229,0),24 },
+       { IPv4(204,222,230,0),23 },
+       { IPv4(204,222,232,0),21 },
+       { IPv4(204,222,250,0),23 },
+       { IPv4(204,222,252,0),23 },
+       { IPv4(204,223,28,0),24 },
+       { IPv4(204,223,30,0),23 },
+       { IPv4(204,223,32,0),19 },
+       { IPv4(204,223,64,0),18 },
+       { IPv4(204,223,128,0),17 },
+       { IPv4(204,225,32,0),24 },
+       { IPv4(204,225,46,0),24 },
+       { IPv4(204,225,47,0),24 },
+       { IPv4(204,225,48,0),24 },
+       { IPv4(204,225,60,0),22 },
+       { IPv4(204,225,64,0),24 },
+       { IPv4(204,225,84,0),22 },
+       { IPv4(204,225,119,0),24 },
+       { IPv4(204,225,134,0),24 },
+       { IPv4(204,225,139,0),24 },
+       { IPv4(204,225,140,0),24 },
+       { IPv4(204,225,141,0),24 },
+       { IPv4(204,225,144,0),24 },
+       { IPv4(204,225,145,0),24 },
+       { IPv4(204,225,156,0),24 },
+       { IPv4(204,225,163,0),24 },
+       { IPv4(204,225,176,0),24 },
+       { IPv4(204,225,177,0),24 },
+       { IPv4(204,225,186,0),24 },
+       { IPv4(204,225,188,0),23 },
+       { IPv4(204,225,218,0),24 },
+       { IPv4(204,227,128,0),19 },
+       { IPv4(204,227,160,0),19 },
+       { IPv4(204,227,161,0),24 },
+       { IPv4(204,227,174,0),24 },
+       { IPv4(204,228,8,0),21 },
+       { IPv4(204,228,21,0),24 },
+       { IPv4(204,228,22,0),23 },
+       { IPv4(204,228,24,0),21 },
+       { IPv4(204,228,27,0),24 },
+       { IPv4(204,228,28,0),24 },
+       { IPv4(204,228,29,0),24 },
+       { IPv4(204,228,64,0),24 },
+       { IPv4(204,228,64,0),18 },
+       { IPv4(204,228,67,0),24 },
+       { IPv4(204,228,68,0),24 },
+       { IPv4(204,228,69,0),24 },
+       { IPv4(204,228,71,0),24 },
+       { IPv4(204,228,78,0),24 },
+       { IPv4(204,228,80,0),24 },
+       { IPv4(204,228,82,0),24 },
+       { IPv4(204,228,89,0),24 },
+       { IPv4(204,228,128,0),19 },
+       { IPv4(204,228,192,0),22 },
+       { IPv4(204,228,203,0),24 },
+       { IPv4(204,228,204,0),24 },
+       { IPv4(204,228,208,0),23 },
+       { IPv4(204,228,210,0),23 },
+       { IPv4(204,228,212,0),24 },
+       { IPv4(204,229,0,0),18 },
+       { IPv4(204,229,36,0),24 },
+       { IPv4(204,229,39,0),24 },
+       { IPv4(204,229,40,0),24 },
+       { IPv4(204,229,41,0),24 },
+       { IPv4(204,229,42,0),23 },
+       { IPv4(204,229,44,0),23 },
+       { IPv4(204,229,182,0),24 },
+       { IPv4(204,229,192,0),18 },
+       { IPv4(204,229,192,0),21 },
+       { IPv4(204,229,200,0),24 },
+       { IPv4(204,229,201,0),24 },
+       { IPv4(204,229,204,0),22 },
+       { IPv4(204,229,219,0),24 },
+       { IPv4(204,229,220,0),22 },
+       { IPv4(204,229,224,0),22 },
+       { IPv4(204,229,234,0),24 },
+       { IPv4(204,229,236,0),22 },
+       { IPv4(204,231,97,0),24 },
+       { IPv4(204,231,110,0),23 },
+       { IPv4(204,231,238,0),24 },
+       { IPv4(204,233,0,0),16 },
+       { IPv4(204,233,170,0),24 },
+       { IPv4(204,233,172,0),22 },
+       { IPv4(204,235,32,0),21 },
+       { IPv4(204,235,40,0),22 },
+       { IPv4(204,235,80,0),20 },
+       { IPv4(204,235,196,0),24 },
+       { IPv4(204,235,224,0),20 },
+       { IPv4(204,235,245,0),24 },
+       { IPv4(204,238,10,0),24 },
+       { IPv4(204,238,15,0),24 },
+       { IPv4(204,238,18,0),24 },
+       { IPv4(204,238,23,0),24 },
+       { IPv4(204,238,24,0),23 },
+       { IPv4(204,238,26,0),24 },
+       { IPv4(204,238,32,0),24 },
+       { IPv4(204,238,37,0),24 },
+       { IPv4(204,238,56,0),24 },
+       { IPv4(204,238,98,0),24 },
+       { IPv4(204,238,107,0),24 },
+       { IPv4(204,238,120,0),24 },
+       { IPv4(204,238,126,0),24 },
+       { IPv4(204,238,129,0),24 },
+       { IPv4(204,238,141,0),24 },
+       { IPv4(204,238,143,0),24 },
+       { IPv4(204,238,151,0),24 },
+       { IPv4(204,238,153,0),24 },
+       { IPv4(204,238,202,0),24 },
+       { IPv4(204,238,211,0),24 },
+       { IPv4(204,238,213,0),24 },
+       { IPv4(204,238,217,0),24 },
+       { IPv4(204,238,232,0),24 },
+       { IPv4(204,238,237,0),24 },
+       { IPv4(204,239,68,0),24 },
+       { IPv4(204,239,123,0),24 },
+       { IPv4(204,239,136,0),23 },
+       { IPv4(204,239,179,0),24 },
+       { IPv4(204,239,214,0),24 },
+       { IPv4(204,244,0,0),16 },
+       { IPv4(204,244,24,0),21 },
+       { IPv4(204,245,128,0),17 },
+       { IPv4(204,246,64,0),18 },
+       { IPv4(204,246,128,0),20 },
+       { IPv4(204,246,144,0),21 },
+       { IPv4(204,246,147,0),24 },
+       { IPv4(204,247,0,0),16 },
+       { IPv4(204,248,29,0),24 },
+       { IPv4(204,248,30,0),24 },
+       { IPv4(204,248,128,0),20 },
+       { IPv4(204,248,175,0),24 },
+       { IPv4(204,248,192,0),21 },
+       { IPv4(204,248,220,0),24 },
+       { IPv4(204,248,221,0),24 },
+       { IPv4(204,248,222,0),24 },
+       { IPv4(204,248,223,0),24 },
+       { IPv4(204,249,48,0),20 },
+       { IPv4(204,249,49,0),24 },
+       { IPv4(204,249,50,0),24 },
+       { IPv4(204,249,51,0),24 },
+       { IPv4(204,249,58,0),24 },
+       { IPv4(204,249,62,0),24 },
+       { IPv4(204,249,63,0),24 },
+       { IPv4(204,249,74,0),24 },
+       { IPv4(204,249,160,0),22 },
+       { IPv4(204,249,232,0),24 },
+       { IPv4(204,249,233,0),24 },
+       { IPv4(204,250,96,0),20 },
+       { IPv4(204,250,125,0),24 },
+       { IPv4(204,250,126,0),24 },
+       { IPv4(204,250,155,0),24 },
+       { IPv4(204,250,160,0),19 },
+       { IPv4(204,251,64,0),21 },
+       { IPv4(204,251,168,0),22 },
+       { IPv4(204,251,188,0),22 },
+       { IPv4(204,251,189,0),24 },
+       { IPv4(204,252,0,0),22 },
+       { IPv4(204,252,74,0),24 },
+       { IPv4(204,252,112,0),20 },
+       { IPv4(204,252,224,0),20 },
+       { IPv4(204,253,8,0),21 },
+       { IPv4(204,253,8,0),24 },
+       { IPv4(204,253,9,0),24 },
+       { IPv4(204,253,10,0),24 },
+       { IPv4(204,253,11,0),24 },
+       { IPv4(204,253,12,0),24 },
+       { IPv4(204,253,13,0),24 },
+       { IPv4(204,253,14,0),24 },
+       { IPv4(204,253,15,0),24 },
+       { IPv4(204,253,83,0),24 },
+       { IPv4(204,253,128,0),22 },
+       { IPv4(204,253,151,0),24 },
+       { IPv4(204,253,168,0),21 },
+       { IPv4(204,254,32,0),24 },
+       { IPv4(204,254,60,0),24 },
+       { IPv4(204,254,61,0),24 },
+       { IPv4(204,254,94,0),24 },
+       { IPv4(204,254,120,0),21 },
+       { IPv4(204,254,168,0),24 },
+       { IPv4(204,254,170,0),24 },
+       { IPv4(204,254,224,0),21 },
+       { IPv4(204,255,32,0),24 },
+       { IPv4(204,255,34,0),24 },
+       { IPv4(204,255,42,0),24 },
+       { IPv4(204,255,43,0),24 },
+       { IPv4(204,255,44,0),24 },
+       { IPv4(204,255,45,0),24 },
+       { IPv4(204,255,50,0),24 },
+       { IPv4(204,255,51,0),24 },
+       { IPv4(204,255,56,0),24 },
+       { IPv4(204,255,57,0),24 },
+       { IPv4(204,255,177,0),24 },
+       { IPv4(204,255,200,0),21 },
+       { IPv4(204,255,224,0),20 },
+       { IPv4(204,255,244,0),23 },
+       { IPv4(205,56,0,0),13 },
+       { IPv4(205,56,144,0),24 },
+       { IPv4(205,56,145,0),24 },
+       { IPv4(205,56,150,0),24 },
+       { IPv4(205,57,192,0),22 },
+       { IPv4(205,57,196,0),24 },
+       { IPv4(205,62,14,0),24 },
+       { IPv4(205,64,0,0),11 },
+       { IPv4(205,65,129,0),24 },
+       { IPv4(205,66,84,0),24 },
+       { IPv4(205,66,100,0),22 },
+       { IPv4(205,66,105,0),24 },
+       { IPv4(205,66,107,0),24 },
+       { IPv4(205,66,110,0),24 },
+       { IPv4(205,66,111,0),24 },
+       { IPv4(205,66,112,0),24 },
+       { IPv4(205,66,113,0),24 },
+       { IPv4(205,66,118,0),24 },
+       { IPv4(205,66,240,0),24 },
+       { IPv4(205,67,206,0),24 },
+       { IPv4(205,67,207,0),24 },
+       { IPv4(205,67,218,0),24 },
+       { IPv4(205,67,223,0),24 },
+       { IPv4(205,67,231,0),24 },
+       { IPv4(205,67,232,0),24 },
+       { IPv4(205,67,252,0),24 },
+       { IPv4(205,67,255,0),24 },
+       { IPv4(205,68,66,0),24 },
+       { IPv4(205,68,69,0),24 },
+       { IPv4(205,68,76,0),24 },
+       { IPv4(205,68,89,0),24 },
+       { IPv4(205,68,90,0),24 },
+       { IPv4(205,68,93,0),24 },
+       { IPv4(205,68,94,0),24 },
+       { IPv4(205,68,95,0),24 },
+       { IPv4(205,68,103,0),24 },
+       { IPv4(205,69,124,0),24 },
+       { IPv4(205,69,192,0),20 },
+       { IPv4(205,69,208,0),21 },
+       { IPv4(205,69,221,0),24 },
+       { IPv4(205,69,224,0),24 },
+       { IPv4(205,69,225,0),24 },
+       { IPv4(205,69,226,0),24 },
+       { IPv4(205,69,227,0),24 },
+       { IPv4(205,69,228,0),24 },
+       { IPv4(205,70,64,0),24 },
+       { IPv4(205,70,65,0),24 },
+       { IPv4(205,70,67,0),24 },
+       { IPv4(205,70,96,0),21 },
+       { IPv4(205,70,104,0),22 },
+       { IPv4(205,70,108,0),23 },
+       { IPv4(205,76,0,0),24 },
+       { IPv4(205,76,1,0),24 },
+       { IPv4(205,76,6,0),24 },
+       { IPv4(205,76,7,0),24 },
+       { IPv4(205,76,8,0),24 },
+       { IPv4(205,76,9,0),24 },
+       { IPv4(205,76,10,0),24 },
+       { IPv4(205,76,11,0),24 },
+       { IPv4(205,76,12,0),24 },
+       { IPv4(205,76,13,0),24 },
+       { IPv4(205,89,128,0),24 },
+       { IPv4(205,94,129,0),24 },
+       { IPv4(205,94,130,0),24 },
+       { IPv4(205,94,131,0),24 },
+       { IPv4(205,94,132,0),24 },
+       { IPv4(205,96,0,0),13 },
+       { IPv4(205,101,96,0),24 },
+       { IPv4(205,101,97,0),24 },
+       { IPv4(205,101,98,0),24 },
+       { IPv4(205,101,99,0),24 },
+       { IPv4(205,101,100,0),24 },
+       { IPv4(205,101,224,0),24 },
+       { IPv4(205,102,128,0),24 },
+       { IPv4(205,102,129,0),24 },
+       { IPv4(205,103,84,0),24 },
+       { IPv4(205,104,0,0),15 },
+       { IPv4(205,106,0,0),15 },
+       { IPv4(205,106,16,0),24 },
+       { IPv4(205,106,75,0),24 },
+       { IPv4(205,106,220,0),24 },
+       { IPv4(205,107,0,0),17 },
+       { IPv4(205,107,192,0),19 },
+       { IPv4(205,108,0,0),15 },
+       { IPv4(205,108,36,0),24 },
+       { IPv4(205,109,23,0),24 },
+       { IPv4(205,109,24,0),24 },
+       { IPv4(205,109,56,0),21 },
+       { IPv4(205,109,64,0),22 },
+       { IPv4(205,109,192,0),24 },
+       { IPv4(205,109,224,0),19 },
+       { IPv4(205,110,0,0),24 },
+       { IPv4(205,110,0,0),16 },
+       { IPv4(205,110,1,0),24 },
+       { IPv4(205,110,2,0),24 },
+       { IPv4(205,110,3,0),24 },
+       { IPv4(205,110,4,0),24 },
+       { IPv4(205,110,5,0),24 },
+       { IPv4(205,110,6,0),24 },
+       { IPv4(205,110,7,0),24 },
+       { IPv4(205,110,8,0),24 },
+       { IPv4(205,110,9,0),24 },
+       { IPv4(205,110,10,0),24 },
+       { IPv4(205,110,11,0),24 },
+       { IPv4(205,110,12,0),24 },
+       { IPv4(205,110,13,0),24 },
+       { IPv4(205,110,14,0),24 },
+       { IPv4(205,110,15,0),24 },
+       { IPv4(205,110,16,0),24 },
+       { IPv4(205,110,17,0),24 },
+       { IPv4(205,110,18,0),24 },
+       { IPv4(205,110,19,0),24 },
+       { IPv4(205,110,20,0),24 },
+       { IPv4(205,110,21,0),24 },
+       { IPv4(205,110,22,0),24 },
+       { IPv4(205,110,23,0),24 },
+       { IPv4(205,110,24,0),24 },
+       { IPv4(205,110,25,0),24 },
+       { IPv4(205,110,26,0),24 },
+       { IPv4(205,110,27,0),24 },
+       { IPv4(205,110,28,0),24 },
+       { IPv4(205,110,29,0),24 },
+       { IPv4(205,110,30,0),24 },
+       { IPv4(205,110,31,0),24 },
+       { IPv4(205,110,205,0),24 },
+       { IPv4(205,110,206,0),24 },
+       { IPv4(205,110,224,0),24 },
+       { IPv4(205,110,225,0),24 },
+       { IPv4(205,113,0,0),16 },
+       { IPv4(205,115,0,0),16 },
+       { IPv4(205,118,0,0),15 },
+       { IPv4(205,120,0,0),13 },
+       { IPv4(205,124,237,0),24 },
+       { IPv4(205,124,245,0),24 },
+       { IPv4(205,127,29,0),24 },
+       { IPv4(205,127,253,0),24 },
+       { IPv4(205,128,8,0),22 },
+       { IPv4(205,132,8,0),24 },
+       { IPv4(205,132,16,0),21 },
+       { IPv4(205,132,73,0),24 },
+       { IPv4(205,132,74,0),24 },
+       { IPv4(205,132,75,0),24 },
+       { IPv4(205,132,76,0),24 },
+       { IPv4(205,132,82,0),24 },
+       { IPv4(205,132,83,0),24 },
+       { IPv4(205,132,173,0),24 },
+       { IPv4(205,132,174,0),24 },
+       { IPv4(205,132,175,0),24 },
+       { IPv4(205,132,224,0),24 },
+       { IPv4(205,132,225,0),24 },
+       { IPv4(205,132,226,0),24 },
+       { IPv4(205,132,227,0),24 },
+       { IPv4(205,132,228,0),24 },
+       { IPv4(205,132,229,0),24 },
+       { IPv4(205,132,230,0),24 },
+       { IPv4(205,132,231,0),24 },
+       { IPv4(205,132,232,0),24 },
+       { IPv4(205,132,233,0),24 },
+       { IPv4(205,132,248,0),21 },
+       { IPv4(205,136,35,0),24 },
+       { IPv4(205,136,46,0),24 },
+       { IPv4(205,136,49,0),24 },
+       { IPv4(205,136,56,0),24 },
+       { IPv4(205,136,60,0),24 },
+       { IPv4(205,136,61,0),24 },
+       { IPv4(205,136,119,0),24 },
+       { IPv4(205,136,158,0),24 },
+       { IPv4(205,136,164,0),22 },
+       { IPv4(205,136,180,0),23 },
+       { IPv4(205,136,182,0),23 },
+       { IPv4(205,136,205,0),24 },
+       { IPv4(205,136,213,0),24 },
+       { IPv4(205,136,224,0),24 },
+       { IPv4(205,136,246,0),23 },
+       { IPv4(205,136,248,0),23 },
+       { IPv4(205,137,96,0),24 },
+       { IPv4(205,137,96,0),20 },
+       { IPv4(205,137,176,0),20 },
+       { IPv4(205,138,133,0),24 },
+       { IPv4(205,138,134,0),24 },
+       { IPv4(205,138,135,0),24 },
+       { IPv4(205,138,136,0),24 },
+       { IPv4(205,138,137,0),24 },
+       { IPv4(205,138,138,0),24 },
+       { IPv4(205,138,230,0),24 },
+       { IPv4(205,139,0,0),23 },
+       { IPv4(205,139,50,0),23 },
+       { IPv4(205,139,96,0),23 },
+       { IPv4(205,139,102,0),23 },
+       { IPv4(205,139,106,0),23 },
+       { IPv4(205,139,120,0),22 },
+       { IPv4(205,139,124,0),24 },
+       { IPv4(205,139,140,0),23 },
+       { IPv4(205,139,189,0),24 },
+       { IPv4(205,139,224,0),22 },
+       { IPv4(205,140,14,0),23 },
+       { IPv4(205,140,126,0),24 },
+       { IPv4(205,140,164,0),22 },
+       { IPv4(205,140,192,0),19 },
+       { IPv4(205,141,128,0),18 },
+       { IPv4(205,142,56,0),22 },
+       { IPv4(205,142,80,0),22 },
+       { IPv4(205,142,96,0),22 },
+       { IPv4(205,142,108,0),23 },
+       { IPv4(205,142,124,0),22 },
+       { IPv4(205,142,149,0),24 },
+       { IPv4(205,142,150,0),24 },
+       { IPv4(205,142,151,0),24 },
+       { IPv4(205,142,164,0),24 },
+       { IPv4(205,142,176,0),24 },
+       { IPv4(205,142,177,0),24 },
+       { IPv4(205,142,188,0),22 },
+       { IPv4(205,142,196,0),24 },
+       { IPv4(205,142,197,0),24 },
+       { IPv4(205,142,198,0),24 },
+       { IPv4(205,142,199,0),24 },
+       { IPv4(205,142,204,0),24 },
+       { IPv4(205,142,205,0),24 },
+       { IPv4(205,142,206,0),24 },
+       { IPv4(205,142,207,0),24 },
+       { IPv4(205,142,236,0),24 },
+       { IPv4(205,142,237,0),24 },
+       { IPv4(205,142,238,0),24 },
+       { IPv4(205,142,239,0),24 },
+       { IPv4(205,143,37,0),24 },
+       { IPv4(205,143,50,0),23 },
+       { IPv4(205,143,52,0),22 },
+       { IPv4(205,143,64,0),21 },
+       { IPv4(205,143,88,0),21 },
+       { IPv4(205,143,100,0),23 },
+       { IPv4(205,143,103,0),24 },
+       { IPv4(205,143,192,0),21 },
+       { IPv4(205,143,200,0),24 },
+       { IPv4(205,143,201,0),24 },
+       { IPv4(205,143,202,0),24 },
+       { IPv4(205,143,203,0),24 },
+       { IPv4(205,143,204,0),24 },
+       { IPv4(205,143,205,0),24 },
+       { IPv4(205,143,207,0),24 },
+       { IPv4(205,143,208,0),21 },
+       { IPv4(205,143,232,0),21 },
+       { IPv4(205,143,248,0),21 },
+       { IPv4(205,143,248,0),24 },
+       { IPv4(205,143,249,0),24 },
+       { IPv4(205,143,250,0),24 },
+       { IPv4(205,143,251,0),24 },
+       { IPv4(205,143,252,0),24 },
+       { IPv4(205,143,253,0),24 },
+       { IPv4(205,143,254,0),24 },
+       { IPv4(205,143,255,0),24 },
+       { IPv4(205,144,99,0),24 },
+       { IPv4(205,144,100,0),24 },
+       { IPv4(205,144,101,0),24 },
+       { IPv4(205,144,106,0),24 },
+       { IPv4(205,144,113,0),24 },
+       { IPv4(205,144,122,0),24 },
+       { IPv4(205,144,123,0),24 },
+       { IPv4(205,144,125,0),24 },
+       { IPv4(205,144,126,0),24 },
+       { IPv4(205,144,146,0),24 },
+       { IPv4(205,144,222,0),24 },
+       { IPv4(205,144,223,0),24 },
+       { IPv4(205,144,225,0),24 },
+       { IPv4(205,145,64,0),24 },
+       { IPv4(205,145,102,0),24 },
+       { IPv4(205,145,158,0),24 },
+       { IPv4(205,145,161,0),24 },
+       { IPv4(205,145,185,0),24 },
+       { IPv4(205,145,186,0),24 },
+       { IPv4(205,146,0,0),16 },
+       { IPv4(205,146,78,0),23 },
+       { IPv4(205,146,148,0),22 },
+       { IPv4(205,146,152,0),24 },
+       { IPv4(205,147,0,0),18 },
+       { IPv4(205,147,128,0),19 },
+       { IPv4(205,147,149,0),24 },
+       { IPv4(205,147,160,0),19 },
+       { IPv4(205,147,192,0),18 },
+       { IPv4(205,148,0,0),18 },
+       { IPv4(205,148,123,0),24 },
+       { IPv4(205,148,125,0),24 },
+       { IPv4(205,148,184,0),24 },
+       { IPv4(205,148,192,0),18 },
+       { IPv4(205,148,225,0),24 },
+       { IPv4(205,148,233,0),24 },
+       { IPv4(205,149,0,0),21 },
+       { IPv4(205,149,120,0),22 },
+       { IPv4(205,149,124,0),23 },
+       { IPv4(205,149,160,0),19 },
+       { IPv4(205,150,42,0),24 },
+       { IPv4(205,150,88,0),24 },
+       { IPv4(205,150,101,0),24 },
+       { IPv4(205,150,136,0),24 },
+       { IPv4(205,150,142,0),24 },
+       { IPv4(205,150,203,0),24 },
+       { IPv4(205,150,218,0),24 },
+       { IPv4(205,150,247,0),24 },
+       { IPv4(205,150,248,0),24 },
+       { IPv4(205,151,82,0),24 },
+       { IPv4(205,151,103,0),24 },
+       { IPv4(205,151,126,0),23 },
+       { IPv4(205,151,179,0),24 },
+       { IPv4(205,151,192,0),20 },
+       { IPv4(205,153,9,0),24 },
+       { IPv4(205,153,10,0),24 },
+       { IPv4(205,153,11,0),24 },
+       { IPv4(205,153,47,0),24 },
+       { IPv4(205,153,60,0),22 },
+       { IPv4(205,153,68,0),22 },
+       { IPv4(205,153,88,0),22 },
+       { IPv4(205,153,196,0),22 },
+       { IPv4(205,153,248,0),22 },
+       { IPv4(205,154,0,0),16 },
+       { IPv4(205,154,0,0),19 },
+       { IPv4(205,154,32,0),20 },
+       { IPv4(205,154,48,0),23 },
+       { IPv4(205,154,160,0),19 },
+       { IPv4(205,155,0,0),16 },
+       { IPv4(205,156,177,0),24 },
+       { IPv4(205,157,65,0),24 },
+       { IPv4(205,157,69,0),24 },
+       { IPv4(205,157,74,0),24 },
+       { IPv4(205,157,85,0),24 },
+       { IPv4(205,157,90,0),24 },
+       { IPv4(205,157,102,0),24 },
+       { IPv4(205,157,103,0),24 },
+       { IPv4(205,157,104,0),24 },
+       { IPv4(205,157,105,0),24 },
+       { IPv4(205,157,128,0),20 },
+       { IPv4(205,157,160,0),19 },
+       { IPv4(205,158,0,0),16 },
+       { IPv4(205,158,160,0),23 },
+       { IPv4(205,158,184,0),24 },
+       { IPv4(205,159,1,0),24 },
+       { IPv4(205,159,16,0),24 },
+       { IPv4(205,159,27,0),24 },
+       { IPv4(205,159,28,0),24 },
+       { IPv4(205,159,81,0),24 },
+       { IPv4(205,159,83,0),24 },
+       { IPv4(205,159,90,0),24 },
+       { IPv4(205,159,126,0),24 },
+       { IPv4(205,159,132,0),24 },
+       { IPv4(205,159,147,0),24 },
+       { IPv4(205,159,151,0),24 },
+       { IPv4(205,159,154,0),24 },
+       { IPv4(205,159,169,0),24 },
+       { IPv4(205,159,173,0),24 },
+       { IPv4(205,159,176,0),24 },
+       { IPv4(205,159,191,0),24 },
+       { IPv4(205,159,233,0),24 },
+       { IPv4(205,159,238,0),24 },
+       { IPv4(205,159,239,0),24 },
+       { IPv4(205,159,248,0),24 },
+       { IPv4(205,160,0,0),22 },
+       { IPv4(205,160,4,0),23 },
+       { IPv4(205,160,112,0),20 },
+       { IPv4(205,160,214,0),24 },
+       { IPv4(205,160,215,0),24 },
+       { IPv4(205,160,216,0),22 },
+       { IPv4(205,160,241,0),24 },
+       { IPv4(205,161,205,0),24 },
+       { IPv4(205,162,5,0),24 },
+       { IPv4(205,162,7,0),24 },
+       { IPv4(205,162,49,0),24 },
+       { IPv4(205,162,54,0),24 },
+       { IPv4(205,162,58,0),24 },
+       { IPv4(205,162,59,0),24 },
+       { IPv4(205,162,64,0),20 },
+       { IPv4(205,162,124,0),22 },
+       { IPv4(205,162,201,0),24 },
+       { IPv4(205,162,202,0),24 },
+       { IPv4(205,162,240,0),24 },
+       { IPv4(205,162,240,0),20 },
+       { IPv4(205,162,245,0),24 },
+       { IPv4(205,162,246,0),24 },
+       { IPv4(205,162,249,0),24 },
+       { IPv4(205,162,250,0),24 },
+       { IPv4(205,162,251,0),24 },
+       { IPv4(205,162,252,0),24 },
+       { IPv4(205,162,254,0),24 },
+       { IPv4(205,163,0,0),19 },
+       { IPv4(205,163,2,0),24 },
+       { IPv4(205,163,3,0),24 },
+       { IPv4(205,163,142,0),24 },
+       { IPv4(205,164,216,0),23 },
+       { IPv4(205,164,219,0),24 },
+       { IPv4(205,166,4,0),24 },
+       { IPv4(205,166,32,0),24 },
+       { IPv4(205,166,33,0),24 },
+       { IPv4(205,166,36,0),24 },
+       { IPv4(205,166,39,0),24 },
+       { IPv4(205,166,48,0),24 },
+       { IPv4(205,166,62,0),24 },
+       { IPv4(205,166,76,0),24 },
+       { IPv4(205,166,82,0),24 },
+       { IPv4(205,166,84,0),24 },
+       { IPv4(205,166,92,0),24 },
+       { IPv4(205,166,115,0),24 },
+       { IPv4(205,166,121,0),24 },
+       { IPv4(205,166,143,0),24 },
+       { IPv4(205,166,146,0),24 },
+       { IPv4(205,166,151,0),24 },
+       { IPv4(205,166,165,0),24 },
+       { IPv4(205,166,180,0),24 },
+       { IPv4(205,166,195,0),24 },
+       { IPv4(205,166,196,0),24 },
+       { IPv4(205,166,214,0),24 },
+       { IPv4(205,166,226,0),24 },
+       { IPv4(205,166,230,0),24 },
+       { IPv4(205,166,234,0),24 },
+       { IPv4(205,166,249,0),24 },
+       { IPv4(205,167,19,0),24 },
+       { IPv4(205,167,22,0),23 },
+       { IPv4(205,167,28,0),24 },
+       { IPv4(205,167,29,0),24 },
+       { IPv4(205,167,36,0),23 },
+       { IPv4(205,167,46,0),23 },
+       { IPv4(205,167,62,0),23 },
+       { IPv4(205,167,68,0),23 },
+       { IPv4(205,167,80,0),23 },
+       { IPv4(205,167,88,0),24 },
+       { IPv4(205,167,89,0),24 },
+       { IPv4(205,167,90,0),23 },
+       { IPv4(205,167,96,0),24 },
+       { IPv4(205,167,108,0),24 },
+       { IPv4(205,167,109,0),24 },
+       { IPv4(205,167,110,0),24 },
+       { IPv4(205,167,111,0),24 },
+       { IPv4(205,167,118,0),24 },
+       { IPv4(205,167,124,0),23 },
+       { IPv4(205,167,128,0),23 },
+       { IPv4(205,167,142,0),23 },
+       { IPv4(205,167,150,0),23 },
+       { IPv4(205,167,162,0),23 },
+       { IPv4(205,167,162,0),24 },
+       { IPv4(205,167,163,0),24 },
+       { IPv4(205,167,174,0),24 },
+       { IPv4(205,167,175,0),24 },
+       { IPv4(205,167,184,0),23 },
+       { IPv4(205,167,188,0),23 },
+       { IPv4(205,167,198,0),24 },
+       { IPv4(205,167,199,0),24 },
+       { IPv4(205,168,0,0),15 },
+       { IPv4(205,168,0,0),16 },
+       { IPv4(205,168,70,0),24 },
+       { IPv4(205,168,96,0),24 },
+       { IPv4(205,168,175,0),24 },
+       { IPv4(205,169,23,0),24 },
+       { IPv4(205,169,24,0),22 },
+       { IPv4(205,169,28,0),23 },
+       { IPv4(205,169,30,0),24 },
+       { IPv4(205,169,171,0),24 },
+       { IPv4(205,170,0,0),20 },
+       { IPv4(205,170,0,0),16 },
+       { IPv4(205,170,168,0),21 },
+       { IPv4(205,170,235,0),24 },
+       { IPv4(205,170,240,0),24 },
+       { IPv4(205,170,241,0),24 },
+       { IPv4(205,170,242,0),24 },
+       { IPv4(205,170,243,0),24 },
+       { IPv4(205,171,64,0),21 },
+       { IPv4(205,171,78,0),24 },
+       { IPv4(205,171,120,0),21 },
+       { IPv4(205,171,129,0),24 },
+       { IPv4(205,171,202,0),24 },
+       { IPv4(205,172,0,0),22 },
+       { IPv4(205,172,8,0),22 },
+       { IPv4(205,172,16,0),22 },
+       { IPv4(205,172,139,0),24 },
+       { IPv4(205,172,156,0),22 },
+       { IPv4(205,172,164,0),24 },
+       { IPv4(205,172,203,0),24 },
+       { IPv4(205,172,212,0),22 },
+       { IPv4(205,173,0,0),21 },
+       { IPv4(205,173,32,0),21 },
+       { IPv4(205,173,40,0),21 },
+       { IPv4(205,173,93,0),24 },
+       { IPv4(205,173,95,0),24 },
+       { IPv4(205,173,129,0),24 },
+       { IPv4(205,173,176,0),21 },
+       { IPv4(205,173,240,0),24 },
+       { IPv4(205,174,16,0),24 },
+       { IPv4(205,174,21,0),24 },
+       { IPv4(205,174,22,0),24 },
+       { IPv4(205,174,23,0),24 },
+       { IPv4(205,174,34,0),24 },
+       { IPv4(205,174,40,0),24 },
+       { IPv4(205,174,41,0),24 },
+       { IPv4(205,174,42,0),24 },
+       { IPv4(205,174,47,0),24 },
+       { IPv4(205,174,64,0),20 },
+       { IPv4(205,174,159,0),24 },
+       { IPv4(205,174,208,0),20 },
+       { IPv4(205,174,240,0),20 },
+       { IPv4(205,175,0,0),19 },
+       { IPv4(205,175,208,0),24 },
+       { IPv4(205,175,224,0),24 },
+       { IPv4(205,175,252,0),24 },
+       { IPv4(205,177,14,0),24 },
+       { IPv4(205,177,62,0),24 },
+       { IPv4(205,177,84,0),23 },
+       { IPv4(205,177,116,0),22 },
+       { IPv4(205,177,140,0),22 },
+       { IPv4(205,177,144,0),20 },
+       { IPv4(205,177,172,0),24 },
+       { IPv4(205,178,0,0),17 },
+       { IPv4(205,178,26,0),24 },
+       { IPv4(205,178,38,0),23 },
+       { IPv4(205,178,41,0),24 },
+       { IPv4(205,178,52,0),24 },
+       { IPv4(205,178,61,0),24 },
+       { IPv4(205,178,85,0),24 },
+       { IPv4(205,178,86,0),24 },
+       { IPv4(205,178,87,0),24 },
+       { IPv4(205,178,93,0),24 },
+       { IPv4(205,178,95,0),24 },
+       { IPv4(205,178,118,0),24 },
+       { IPv4(205,178,123,0),24 },
+       { IPv4(205,178,125,0),24 },
+       { IPv4(205,180,10,0),24 },
+       { IPv4(205,180,15,0),24 },
+       { IPv4(205,180,85,0),24 },
+       { IPv4(205,180,86,0),24 },
+       { IPv4(205,180,87,0),24 },
+       { IPv4(205,180,192,0),24 },
+       { IPv4(205,180,193,0),24 },
+       { IPv4(205,181,72,0),24 },
+       { IPv4(205,181,180,0),24 },
+       { IPv4(205,181,181,0),24 },
+       { IPv4(205,181,240,0),24 },
+       { IPv4(205,181,242,0),24 },
+       { IPv4(205,184,0,0),14 },
+       { IPv4(205,184,3,0),24 },
+       { IPv4(205,184,14,0),24 },
+       { IPv4(205,184,38,0),24 },
+       { IPv4(205,184,62,0),24 },
+       { IPv4(205,184,128,0),23 },
+       { IPv4(205,184,138,0),24 },
+       { IPv4(205,184,151,0),24 },
+       { IPv4(205,184,204,0),23 },
+       { IPv4(205,184,218,0),23 },
+       { IPv4(205,184,238,0),24 },
+       { IPv4(205,184,240,0),24 },
+       { IPv4(205,185,12,0),24 },
+       { IPv4(205,185,14,0),24 },
+       { IPv4(205,185,17,0),24 },
+       { IPv4(205,185,18,0),24 },
+       { IPv4(205,185,19,0),24 },
+       { IPv4(205,185,21,0),24 },
+       { IPv4(205,185,22,0),24 },
+       { IPv4(205,185,24,0),24 },
+       { IPv4(205,185,25,0),24 },
+       { IPv4(205,185,119,0),24 },
+       { IPv4(205,186,39,0),24 },
+       { IPv4(205,186,155,0),24 },
+       { IPv4(205,187,75,0),24 },
+       { IPv4(205,187,207,0),24 },
+       { IPv4(205,187,221,0),24 },
+       { IPv4(205,187,228,0),24 },
+       { IPv4(205,188,0,0),16 },
+       { IPv4(205,188,64,0),18 },
+       { IPv4(205,189,1,0),24 },
+       { IPv4(205,189,39,0),24 },
+       { IPv4(205,189,40,0),24 },
+       { IPv4(205,189,51,0),24 },
+       { IPv4(205,189,71,0),24 },
+       { IPv4(205,189,72,0),23 },
+       { IPv4(205,189,86,0),23 },
+       { IPv4(205,189,108,0),24 },
+       { IPv4(205,189,134,0),24 },
+       { IPv4(205,189,139,0),24 },
+       { IPv4(205,189,151,0),24 },
+       { IPv4(205,189,152,0),24 },
+       { IPv4(205,189,204,0),24 },
+       { IPv4(205,190,14,0),24 },
+       { IPv4(205,191,64,0),24 },
+       { IPv4(205,191,128,0),23 },
+       { IPv4(205,191,166,0),24 },
+       { IPv4(205,191,176,0),24 },
+       { IPv4(205,191,194,0),24 },
+       { IPv4(205,193,0,0),16 },
+       { IPv4(205,198,244,0),24 },
+       { IPv4(205,199,135,0),24 },
+       { IPv4(205,199,148,0),22 },
+       { IPv4(205,199,200,0),23 },
+       { IPv4(205,199,232,0),22 },
+       { IPv4(205,201,0,0),18 },
+       { IPv4(205,202,96,0),19 },
+       { IPv4(205,202,192,0),18 },
+       { IPv4(205,203,64,0),19 },
+       { IPv4(205,203,224,0),19 },
+       { IPv4(205,205,19,0),24 },
+       { IPv4(205,205,56,0),24 },
+       { IPv4(205,205,98,0),24 },
+       { IPv4(205,205,149,0),24 },
+       { IPv4(205,207,64,0),24 },
+       { IPv4(205,207,69,0),24 },
+       { IPv4(205,207,98,0),24 },
+       { IPv4(205,207,136,0),24 },
+       { IPv4(205,207,137,0),24 },
+       { IPv4(205,207,138,0),24 },
+       { IPv4(205,207,175,0),24 },
+       { IPv4(205,207,184,0),24 },
+       { IPv4(205,207,185,0),24 },
+       { IPv4(205,207,188,0),24 },
+       { IPv4(205,207,214,0),24 },
+       { IPv4(205,207,243,0),24 },
+       { IPv4(205,208,148,0),24 },
+       { IPv4(205,210,28,0),24 },
+       { IPv4(205,210,42,0),24 },
+       { IPv4(205,210,85,0),24 },
+       { IPv4(205,210,86,0),23 },
+       { IPv4(205,210,88,0),21 },
+       { IPv4(205,210,104,0),24 },
+       { IPv4(205,210,137,0),24 },
+       { IPv4(205,210,138,0),24 },
+       { IPv4(205,210,141,0),24 },
+       { IPv4(205,210,144,0),24 },
+       { IPv4(205,210,145,0),24 },
+       { IPv4(205,210,147,0),24 },
+       { IPv4(205,210,148,0),24 },
+       { IPv4(205,210,184,0),24 },
+       { IPv4(205,210,218,0),24 },
+       { IPv4(205,210,220,0),24 },
+       { IPv4(205,210,221,0),24 },
+       { IPv4(205,210,222,0),24 },
+       { IPv4(205,210,223,0),24 },
+       { IPv4(205,210,228,0),24 },
+       { IPv4(205,210,229,0),24 },
+       { IPv4(205,210,230,0),24 },
+       { IPv4(205,210,231,0),24 },
+       { IPv4(205,210,248,0),24 },
+       { IPv4(205,211,11,0),24 },
+       { IPv4(205,211,16,0),21 },
+       { IPv4(205,211,140,0),22 },
+       { IPv4(205,211,144,0),23 },
+       { IPv4(205,211,164,0),24 },
+       { IPv4(205,211,165,0),24 },
+       { IPv4(205,211,168,0),24 },
+       { IPv4(205,211,169,0),24 },
+       { IPv4(205,212,0,0),16 },
+       { IPv4(205,212,112,0),20 },
+       { IPv4(205,212,144,0),21 },
+       { IPv4(205,212,152,0),21 },
+       { IPv4(205,214,42,0),24 },
+       { IPv4(205,214,160,0),20 },
+       { IPv4(205,215,0,0),18 },
+       { IPv4(205,215,11,0),24 },
+       { IPv4(205,215,14,0),24 },
+       { IPv4(205,215,29,0),24 },
+       { IPv4(205,215,33,0),24 },
+       { IPv4(205,215,37,0),24 },
+       { IPv4(205,215,38,0),24 },
+       { IPv4(205,215,51,0),24 },
+       { IPv4(205,215,52,0),24 },
+       { IPv4(205,215,56,0),24 },
+       { IPv4(205,215,57,0),24 },
+       { IPv4(205,215,58,0),24 },
+       { IPv4(205,215,62,0),24 },
+       { IPv4(205,215,128,0),18 },
+       { IPv4(205,215,192,0),19 },
+       { IPv4(205,215,210,0),24 },
+       { IPv4(205,215,211,0),24 },
+       { IPv4(205,215,212,0),24 },
+       { IPv4(205,215,216,0),24 },
+       { IPv4(205,215,232,0),23 },
+       { IPv4(205,216,147,0),24 },
+       { IPv4(205,217,32,0),20 },
+       { IPv4(205,217,104,0),24 },
+       { IPv4(205,217,201,0),24 },
+       { IPv4(205,217,216,0),24 },
+       { IPv4(205,217,220,0),22 },
+       { IPv4(205,217,224,0),19 },
+       { IPv4(205,218,108,0),23 },
+       { IPv4(205,218,118,0),23 },
+       { IPv4(205,218,156,0),22 },
+       { IPv4(205,218,186,0),24 },
+       { IPv4(205,219,64,0),19 },
+       { IPv4(205,219,120,0),23 },
+       { IPv4(205,219,138,0),23 },
+       { IPv4(205,219,141,0),24 },
+       { IPv4(205,219,162,0),24 },
+       { IPv4(205,219,188,0),24 },
+       { IPv4(205,219,188,0),23 },
+       { IPv4(205,219,198,0),23 },
+       { IPv4(205,219,208,0),24 },
+       { IPv4(205,219,209,0),24 },
+       { IPv4(205,219,212,0),24 },
+       { IPv4(205,220,0,0),17 },
+       { IPv4(205,221,0,0),16 },
+       { IPv4(205,221,176,0),21 },
+       { IPv4(205,221,208,0),21 },
+       { IPv4(205,222,0,0),16 },
+       { IPv4(205,223,126,0),24 },
+       { IPv4(205,223,128,0),24 },
+       { IPv4(205,226,57,0),24 },
+       { IPv4(205,227,180,0),24 },
+       { IPv4(205,227,181,0),24 },
+       { IPv4(205,227,182,0),24 },
+       { IPv4(205,227,183,0),24 },
+       { IPv4(205,227,204,0),24 },
+       { IPv4(205,228,0,0),18 },
+       { IPv4(205,228,240,0),21 },
+       { IPv4(205,228,245,0),24 },
+       { IPv4(205,229,32,0),20 },
+       { IPv4(205,229,250,0),24 },
+       { IPv4(205,230,16,0),21 },
+       { IPv4(205,230,25,0),24 },
+       { IPv4(205,230,56,0),24 },
+       { IPv4(205,230,57,0),24 },
+       { IPv4(205,231,44,0),24 },
+       { IPv4(205,231,82,0),23 },
+       { IPv4(205,231,214,0),24 },
+       { IPv4(205,232,0,0),16 },
+       { IPv4(205,232,18,0),23 },
+       { IPv4(205,232,37,0),24 },
+       { IPv4(205,232,69,0),24 },
+       { IPv4(205,232,84,0),24 },
+       { IPv4(205,232,85,0),24 },
+       { IPv4(205,232,128,0),21 },
+       { IPv4(205,232,164,0),24 },
+       { IPv4(205,232,165,0),24 },
+       { IPv4(205,232,214,0),24 },
+       { IPv4(205,232,217,0),24 },
+       { IPv4(205,232,248,0),22 },
+       { IPv4(205,233,22,0),23 },
+       { IPv4(205,233,24,0),21 },
+       { IPv4(205,233,28,0),24 },
+       { IPv4(205,233,29,0),24 },
+       { IPv4(205,233,64,0),24 },
+       { IPv4(205,233,68,0),24 },
+       { IPv4(205,233,106,0),24 },
+       { IPv4(205,233,139,0),24 },
+       { IPv4(205,233,186,0),24 },
+       { IPv4(205,233,187,0),24 },
+       { IPv4(205,233,206,0),24 },
+       { IPv4(205,233,221,0),24 },
+       { IPv4(205,234,0,0),19 },
+       { IPv4(205,234,0,0),22 },
+       { IPv4(205,234,16,0),21 },
+       { IPv4(205,234,24,0),21 },
+       { IPv4(205,235,0,0),21 },
+       { IPv4(205,235,8,0),21 },
+       { IPv4(205,235,16,0),20 },
+       { IPv4(205,235,28,0),22 },
+       { IPv4(205,235,32,0),20 },
+       { IPv4(205,235,48,0),24 },
+       { IPv4(205,235,49,0),24 },
+       { IPv4(205,235,50,0),24 },
+       { IPv4(205,235,51,0),24 },
+       { IPv4(205,235,52,0),24 },
+       { IPv4(205,235,53,0),24 },
+       { IPv4(205,235,54,0),24 },
+       { IPv4(205,235,55,0),24 },
+       { IPv4(205,235,56,0),24 },
+       { IPv4(205,235,57,0),24 },
+       { IPv4(205,235,58,0),24 },
+       { IPv4(205,235,59,0),24 },
+       { IPv4(205,235,60,0),24 },
+       { IPv4(205,235,61,0),24 },
+       { IPv4(205,235,62,0),24 },
+       { IPv4(205,235,63,0),24 },
+       { IPv4(205,235,64,0),24 },
+       { IPv4(205,235,65,0),24 },
+       { IPv4(205,235,66,0),24 },
+       { IPv4(205,235,67,0),24 },
+       { IPv4(205,235,68,0),24 },
+       { IPv4(205,235,69,0),24 },
+       { IPv4(205,235,70,0),24 },
+       { IPv4(205,235,71,0),24 },
+       { IPv4(205,235,72,0),24 },
+       { IPv4(205,235,73,0),24 },
+       { IPv4(205,235,74,0),24 },
+       { IPv4(205,235,75,0),24 },
+       { IPv4(205,235,76,0),24 },
+       { IPv4(205,235,77,0),24 },
+       { IPv4(205,235,78,0),24 },
+       { IPv4(205,235,79,0),24 },
+       { IPv4(205,235,112,0),24 },
+       { IPv4(205,235,113,0),24 },
+       { IPv4(205,235,114,0),24 },
+       { IPv4(205,235,115,0),24 },
+       { IPv4(205,235,116,0),24 },
+       { IPv4(205,235,117,0),24 },
+       { IPv4(205,235,118,0),24 },
+       { IPv4(205,235,119,0),24 },
+       { IPv4(205,235,149,0),24 },
+       { IPv4(205,235,159,0),24 },
+       { IPv4(205,235,160,0),20 },
+       { IPv4(205,236,14,0),24 },
+       { IPv4(205,236,15,0),24 },
+       { IPv4(205,236,93,0),24 },
+       { IPv4(205,236,94,0),24 },
+       { IPv4(205,236,95,0),24 },
+       { IPv4(205,236,134,0),23 },
+       { IPv4(205,236,136,0),22 },
+       { IPv4(205,236,143,0),24 },
+       { IPv4(205,236,144,0),24 },
+       { IPv4(205,236,185,0),24 },
+       { IPv4(205,237,20,0),24 },
+       { IPv4(205,237,230,0),24 },
+       { IPv4(205,237,242,0),24 },
+       { IPv4(205,238,0,0),18 },
+       { IPv4(205,238,3,0),24 },
+       { IPv4(205,238,5,0),24 },
+       { IPv4(205,238,18,0),24 },
+       { IPv4(205,238,28,0),24 },
+       { IPv4(205,238,30,0),24 },
+       { IPv4(205,238,192,0),18 },
+       { IPv4(205,239,178,0),24 },
+       { IPv4(205,239,179,0),24 },
+       { IPv4(205,239,180,0),24 },
+       { IPv4(205,240,80,0),22 },
+       { IPv4(205,240,240,0),24 },
+       { IPv4(205,241,0,0),21 },
+       { IPv4(205,241,144,0),22 },
+       { IPv4(205,242,228,0),24 },
+       { IPv4(205,242,229,0),24 },
+       { IPv4(205,243,72,0),24 },
+       { IPv4(205,243,88,0),24 },
+       { IPv4(205,243,161,0),24 },
+       { IPv4(205,243,162,0),23 },
+       { IPv4(205,243,166,0),23 },
+       { IPv4(205,243,192,0),19 },
+       { IPv4(205,243,220,0),24 },
+       { IPv4(205,244,0,0),21 },
+       { IPv4(205,244,73,0),24 },
+       { IPv4(205,244,74,0),24 },
+       { IPv4(205,244,75,0),24 },
+       { IPv4(205,244,160,0),21 },
+       { IPv4(205,244,188,0),24 },
+       { IPv4(205,244,189,0),24 },
+       { IPv4(205,245,64,0),24 },
+       { IPv4(205,245,65,0),24 },
+       { IPv4(205,245,103,0),24 },
+       { IPv4(205,245,104,0),22 },
+       { IPv4(205,245,135,0),24 },
+       { IPv4(205,246,8,0),21 },
+       { IPv4(205,246,16,0),23 },
+       { IPv4(205,246,48,0),20 },
+       { IPv4(205,246,72,0),21 },
+       { IPv4(205,246,194,0),24 },
+       { IPv4(205,247,126,0),23 },
+       { IPv4(205,247,136,0),23 },
+       { IPv4(205,247,160,0),20 },
+       { IPv4(205,247,176,0),20 },
+       { IPv4(205,248,29,0),24 },
+       { IPv4(205,248,30,0),24 },
+       { IPv4(205,248,31,0),24 },
+       { IPv4(205,248,226,0),24 },
+       { IPv4(205,248,236,0),24 },
+       { IPv4(205,250,137,0),24 },
+       { IPv4(205,251,12,0),24 },
+       { IPv4(205,251,17,0),24 },
+       { IPv4(205,251,32,0),22 },
+       { IPv4(205,251,36,0),22 },
+       { IPv4(205,251,41,0),24 },
+       { IPv4(205,251,55,0),24 },
+       { IPv4(205,251,56,0),23 },
+       { IPv4(205,251,85,0),24 },
+       { IPv4(205,251,86,0),23 },
+       { IPv4(205,251,88,0),22 },
+       { IPv4(205,251,222,0),23 },
+       { IPv4(205,251,226,0),23 },
+       { IPv4(205,251,233,0),24 },
+       { IPv4(205,251,250,0),23 },
+       { IPv4(205,252,27,0),24 },
+       { IPv4(205,252,116,0),22 },
+       { IPv4(205,252,176,0),20 },
+       { IPv4(205,253,72,0),22 },
+       { IPv4(205,253,107,0),24 },
+       { IPv4(205,253,140,0),23 },
+       { IPv4(205,253,140,0),24 },
+       { IPv4(205,253,192,0),19 },
+       { IPv4(205,254,224,0),19 },
+       { IPv4(206,8,0,0),14 },
+       { IPv4(206,8,8,0),22 },
+       { IPv4(206,8,192,0),19 },
+       { IPv4(206,8,224,0),23 },
+       { IPv4(206,9,32,0),21 },
+       { IPv4(206,9,147,0),24 },
+       { IPv4(206,9,148,0),22 },
+       { IPv4(206,10,223,0),24 },
+       { IPv4(206,12,82,0),24 },
+       { IPv4(206,12,90,0),23 },
+       { IPv4(206,12,148,0),24 },
+       { IPv4(206,12,246,0),24 },
+       { IPv4(206,14,0,0),16 },
+       { IPv4(206,14,57,0),24 },
+       { IPv4(206,14,85,0),24 },
+       { IPv4(206,14,97,0),24 },
+       { IPv4(206,14,176,0),24 },
+       { IPv4(206,14,202,0),24 },
+       { IPv4(206,14,228,0),24 },
+       { IPv4(206,14,238,0),24 },
+       { IPv4(206,14,239,0),24 },
+       { IPv4(206,14,240,0),24 },
+       { IPv4(206,14,241,0),24 },
+       { IPv4(206,14,248,0),24 },
+       { IPv4(206,14,249,0),24 },
+       { IPv4(206,15,11,0),24 },
+       { IPv4(206,15,24,0),24 },
+       { IPv4(206,15,64,0),20 },
+       { IPv4(206,15,64,0),19 },
+       { IPv4(206,15,80,0),21 },
+       { IPv4(206,15,154,0),24 },
+       { IPv4(206,16,0,0),14 },
+       { IPv4(206,16,32,0),22 },
+       { IPv4(206,16,48,0),22 },
+       { IPv4(206,16,136,0),22 },
+       { IPv4(206,16,148,0),23 },
+       { IPv4(206,16,192,0),20 },
+       { IPv4(206,17,20,0),22 },
+       { IPv4(206,17,24,0),21 },
+       { IPv4(206,17,32,0),22 },
+       { IPv4(206,17,58,0),23 },
+       { IPv4(206,17,94,0),23 },
+       { IPv4(206,17,226,0),24 },
+       { IPv4(206,17,250,0),24 },
+       { IPv4(206,18,32,0),21 },
+       { IPv4(206,18,128,0),17 },
+       { IPv4(206,19,61,0),24 },
+       { IPv4(206,19,76,0),24 },
+       { IPv4(206,19,77,0),24 },
+       { IPv4(206,19,96,0),21 },
+       { IPv4(206,19,124,0),23 },
+       { IPv4(206,19,126,0),24 },
+       { IPv4(206,19,129,0),24 },
+       { IPv4(206,19,130,0),23 },
+       { IPv4(206,19,144,0),24 },
+       { IPv4(206,19,146,0),24 },
+       { IPv4(206,19,147,0),24 },
+       { IPv4(206,19,192,0),24 },
+       { IPv4(206,20,0,0),16 },
+       { IPv4(206,20,212,0),24 },
+       { IPv4(206,24,31,0),24 },
+       { IPv4(206,24,35,0),24 },
+       { IPv4(206,24,48,0),21 },
+       { IPv4(206,24,64,0),24 },
+       { IPv4(206,24,68,0),23 },
+       { IPv4(206,25,112,0),24 },
+       { IPv4(206,25,119,0),24 },
+       { IPv4(206,25,172,0),24 },
+       { IPv4(206,25,182,0),24 },
+       { IPv4(206,26,12,0),22 },
+       { IPv4(206,26,98,0),23 },
+       { IPv4(206,26,124,0),22 },
+       { IPv4(206,26,160,0),21 },
+       { IPv4(206,26,217,0),24 },
+       { IPv4(206,26,224,0),21 },
+       { IPv4(206,27,104,0),24 },
+       { IPv4(206,27,118,0),23 },
+       { IPv4(206,27,215,0),24 },
+       { IPv4(206,27,216,0),24 },
+       { IPv4(206,27,238,0),23 },
+       { IPv4(206,27,244,0),23 },
+       { IPv4(206,28,32,0),19 },
+       { IPv4(206,28,102,0),24 },
+       { IPv4(206,28,109,0),24 },
+       { IPv4(206,28,116,0),23 },
+       { IPv4(206,28,119,0),24 },
+       { IPv4(206,28,124,0),22 },
+       { IPv4(206,28,142,0),23 },
+       { IPv4(206,28,153,0),24 },
+       { IPv4(206,29,10,0),24 },
+       { IPv4(206,29,64,0),21 },
+       { IPv4(206,29,77,0),24 },
+       { IPv4(206,29,88,0),21 },
+       { IPv4(206,29,168,0),21 },
+       { IPv4(206,29,184,0),21 },
+       { IPv4(206,29,192,0),22 },
+       { IPv4(206,29,196,0),23 },
+       { IPv4(206,29,224,0),20 },
+       { IPv4(206,30,30,0),24 },
+       { IPv4(206,30,32,0),21 },
+       { IPv4(206,30,48,0),21 },
+       { IPv4(206,30,66,0),24 },
+       { IPv4(206,30,130,0),24 },
+       { IPv4(206,30,144,0),22 },
+       { IPv4(206,31,22,0),24 },
+       { IPv4(206,31,70,0),23 },
+       { IPv4(206,31,77,0),24 },
+       { IPv4(206,31,88,0),24 },
+       { IPv4(206,31,89,0),24 },
+       { IPv4(206,31,90,0),24 },
+       { IPv4(206,31,91,0),24 },
+       { IPv4(206,31,92,0),24 },
+       { IPv4(206,31,93,0),24 },
+       { IPv4(206,31,94,0),24 },
+       { IPv4(206,31,95,0),24 },
+       { IPv4(206,31,212,0),24 },
+       { IPv4(206,31,213,0),24 },
+       { IPv4(206,31,219,0),24 },
+       { IPv4(206,32,34,0),24 },
+       { IPv4(206,35,160,0),20 },
+       { IPv4(206,37,0,0),16 },
+       { IPv4(206,37,28,0),24 },
+       { IPv4(206,37,29,0),24 },
+       { IPv4(206,37,30,0),24 },
+       { IPv4(206,37,31,0),24 },
+       { IPv4(206,37,126,0),24 },
+       { IPv4(206,37,146,0),24 },
+       { IPv4(206,37,153,0),24 },
+       { IPv4(206,37,158,0),24 },
+       { IPv4(206,37,159,0),24 },
+       { IPv4(206,37,164,0),24 },
+       { IPv4(206,37,185,0),24 },
+       { IPv4(206,37,186,0),24 },
+       { IPv4(206,37,191,0),24 },
+       { IPv4(206,37,199,0),24 },
+       { IPv4(206,37,200,0),24 },
+       { IPv4(206,37,205,0),24 },
+       { IPv4(206,37,206,0),24 },
+       { IPv4(206,37,213,0),24 },
+       { IPv4(206,38,0,0),16 },
+       { IPv4(206,38,39,0),24 },
+       { IPv4(206,38,102,0),24 },
+       { IPv4(206,38,115,0),24 },
+       { IPv4(206,38,117,0),24 },
+       { IPv4(206,38,174,0),24 },
+       { IPv4(206,39,0,0),16 },
+       { IPv4(206,39,0,0),17 },
+       { IPv4(206,39,48,0),21 },
+       { IPv4(206,39,65,0),24 },
+       { IPv4(206,39,67,0),24 },
+       { IPv4(206,39,68,0),24 },
+       { IPv4(206,39,108,0),24 },
+       { IPv4(206,39,110,0),24 },
+       { IPv4(206,39,111,0),24 },
+       { IPv4(206,39,116,0),23 },
+       { IPv4(206,39,128,0),23 },
+       { IPv4(206,39,128,0),18 },
+       { IPv4(206,39,148,0),22 },
+       { IPv4(206,39,160,0),21 },
+       { IPv4(206,39,192,0),21 },
+       { IPv4(206,39,192,0),20 },
+       { IPv4(206,39,192,0),22 },
+       { IPv4(206,39,200,0),23 },
+       { IPv4(206,39,202,0),24 },
+       { IPv4(206,39,203,0),24 },
+       { IPv4(206,39,205,0),24 },
+       { IPv4(206,39,206,0),24 },
+       { IPv4(206,39,207,0),24 },
+       { IPv4(206,39,208,0),24 },
+       { IPv4(206,39,208,0),21 },
+       { IPv4(206,39,209,0),24 },
+       { IPv4(206,39,216,0),22 },
+       { IPv4(206,39,232,0),21 },
+       { IPv4(206,39,240,0),20 },
+       { IPv4(206,39,250,0),24 },
+       { IPv4(206,40,40,0),21 },
+       { IPv4(206,40,48,0),24 },
+       { IPv4(206,40,79,0),24 },
+       { IPv4(206,40,93,0),24 },
+       { IPv4(206,41,32,0),19 },
+       { IPv4(206,41,128,0),23 },
+       { IPv4(206,43,192,0),19 },
+       { IPv4(206,45,87,0),24 },
+       { IPv4(206,45,234,0),24 },
+       { IPv4(206,45,247,0),24 },
+       { IPv4(206,45,254,0),24 },
+       { IPv4(206,47,128,0),22 },
+       { IPv4(206,47,218,0),23 },
+       { IPv4(206,48,168,0),22 },
+       { IPv4(206,49,34,0),24 },
+       { IPv4(206,49,58,0),23 },
+       { IPv4(206,49,58,0),24 },
+       { IPv4(206,49,59,0),24 },
+       { IPv4(206,49,79,0),24 },
+       { IPv4(206,49,195,0),24 },
+       { IPv4(206,50,0,0),16 },
+       { IPv4(206,51,23,0),24 },
+       { IPv4(206,51,24,0),24 },
+       { IPv4(206,51,25,0),24 },
+       { IPv4(206,51,26,0),24 },
+       { IPv4(206,51,250,0),24 },
+       { IPv4(206,51,251,0),24 },
+       { IPv4(206,52,0,0),16 },
+       { IPv4(206,52,68,0),23 },
+       { IPv4(206,53,0,0),22 },
+       { IPv4(206,53,132,0),24 },
+       { IPv4(206,53,192,0),21 },
+       { IPv4(206,53,233,0),24 },
+       { IPv4(206,54,0,0),18 },
+       { IPv4(206,54,244,0),23 },
+       { IPv4(206,54,252,0),24 },
+       { IPv4(206,54,253,0),24 },
+       { IPv4(206,55,0,0),18 },
+       { IPv4(206,55,224,0),19 },
+       { IPv4(206,56,128,0),17 },
+       { IPv4(206,57,110,0),24 },
+       { IPv4(206,58,0,0),16 },
+       { IPv4(206,58,34,0),24 },
+       { IPv4(206,58,95,0),24 },
+       { IPv4(206,58,98,0),24 },
+       { IPv4(206,58,127,0),24 },
+       { IPv4(206,58,133,0),24 },
+       { IPv4(206,58,136,0),23 },
+       { IPv4(206,58,138,0),24 },
+       { IPv4(206,58,140,0),24 },
+       { IPv4(206,58,160,0),24 },
+       { IPv4(206,58,228,0),24 },
+       { IPv4(206,58,236,0),24 },
+       { IPv4(206,58,248,0),21 },
+       { IPv4(206,61,96,0),24 },
+       { IPv4(206,61,98,0),23 },
+       { IPv4(206,61,100,0),23 },
+       { IPv4(206,61,102,0),24 },
+       { IPv4(206,61,103,0),24 },
+       { IPv4(206,61,168,0),21 },
+       { IPv4(206,62,140,0),22 },
+       { IPv4(206,62,192,0),22 },
+       { IPv4(206,63,143,0),24 },
+       { IPv4(206,63,201,0),24 },
+       { IPv4(206,63,202,0),24 },
+       { IPv4(206,64,32,0),20 },
+       { IPv4(206,64,47,0),24 },
+       { IPv4(206,64,112,0),24 },
+       { IPv4(206,64,128,0),23 },
+       { IPv4(206,64,152,0),21 },
+       { IPv4(206,64,192,0),24 },
+       { IPv4(206,65,48,0),20 },
+       { IPv4(206,65,48,0),21 },
+       { IPv4(206,65,56,0),21 },
+       { IPv4(206,65,64,0),23 },
+       { IPv4(206,65,183,0),24 },
+       { IPv4(206,66,160,0),24 },
+       { IPv4(206,66,179,0),24 },
+       { IPv4(206,67,68,0),22 },
+       { IPv4(206,67,78,0),23 },
+       { IPv4(206,67,96,0),20 },
+       { IPv4(206,67,186,0),23 },
+       { IPv4(206,67,234,0),24 },
+       { IPv4(206,67,239,0),24 },
+       { IPv4(206,67,240,0),20 },
+       { IPv4(206,68,0,0),15 },
+       { IPv4(206,70,0,0),16 },
+       { IPv4(206,70,132,0),24 },
+       { IPv4(206,71,64,0),19 },
+       { IPv4(206,71,96,0),21 },
+       { IPv4(206,71,104,0),21 },
+       { IPv4(206,71,112,0),21 },
+       { IPv4(206,71,120,0),21 },
+       { IPv4(206,71,160,0),19 },
+       { IPv4(206,71,224,0),19 },
+       { IPv4(206,72,0,0),18 },
+       { IPv4(206,73,0,0),16 },
+       { IPv4(206,73,5,0),24 },
+       { IPv4(206,73,8,0),24 },
+       { IPv4(206,73,10,0),24 },
+       { IPv4(206,73,14,0),24 },
+       { IPv4(206,73,16,0),24 },
+       { IPv4(206,73,21,0),24 },
+       { IPv4(206,73,34,0),24 },
+       { IPv4(206,73,39,0),24 },
+       { IPv4(206,73,45,0),24 },
+       { IPv4(206,73,47,0),24 },
+       { IPv4(206,73,51,0),24 },
+       { IPv4(206,73,52,0),24 },
+       { IPv4(206,73,53,0),24 },
+       { IPv4(206,73,54,0),24 },
+       { IPv4(206,73,55,0),24 },
+       { IPv4(206,73,59,0),24 },
+       { IPv4(206,73,80,0),24 },
+       { IPv4(206,73,103,0),24 },
+       { IPv4(206,73,105,0),24 },
+       { IPv4(206,73,107,0),24 },
+       { IPv4(206,73,110,0),24 },
+       { IPv4(206,73,112,0),24 },
+       { IPv4(206,73,113,0),24 },
+       { IPv4(206,73,123,0),24 },
+       { IPv4(206,73,136,0),24 },
+       { IPv4(206,73,158,0),24 },
+       { IPv4(206,73,163,0),24 },
+       { IPv4(206,73,164,0),24 },
+       { IPv4(206,73,171,0),24 },
+       { IPv4(206,73,174,0),24 },
+       { IPv4(206,73,177,0),24 },
+       { IPv4(206,73,182,0),24 },
+       { IPv4(206,73,189,0),24 },
+       { IPv4(206,73,190,0),24 },
+       { IPv4(206,73,194,0),24 },
+       { IPv4(206,73,200,0),24 },
+       { IPv4(206,73,202,0),24 },
+       { IPv4(206,73,205,0),24 },
+       { IPv4(206,73,206,0),24 },
+       { IPv4(206,73,209,0),24 },
+       { IPv4(206,73,211,0),24 },
+       { IPv4(206,73,222,0),24 },
+       { IPv4(206,73,227,0),24 },
+       { IPv4(206,73,228,0),24 },
+       { IPv4(206,73,234,0),24 },
+       { IPv4(206,73,235,0),24 },
+       { IPv4(206,73,238,0),24 },
+       { IPv4(206,73,239,0),24 },
+       { IPv4(206,73,240,0),24 },
+       { IPv4(206,73,244,0),24 },
+       { IPv4(206,73,252,0),24 },
+       { IPv4(206,74,0,0),16 },
+       { IPv4(206,75,69,0),24 },
+       { IPv4(206,75,82,0),24 },
+       { IPv4(206,75,114,0),24 },
+       { IPv4(206,75,216,0),24 },
+       { IPv4(206,75,218,0),24 },
+       { IPv4(206,78,0,0),19 },
+       { IPv4(206,78,160,0),19 },
+       { IPv4(206,79,32,0),20 },
+       { IPv4(206,79,140,0),24 },
+       { IPv4(206,80,0,0),23 },
+       { IPv4(206,80,0,0),19 },
+       { IPv4(206,80,7,0),24 },
+       { IPv4(206,80,8,0),24 },
+       { IPv4(206,80,17,0),24 },
+       { IPv4(206,80,18,0),24 },
+       { IPv4(206,80,19,0),24 },
+       { IPv4(206,80,20,0),23 },
+       { IPv4(206,80,22,0),23 },
+       { IPv4(206,80,26,0),23 },
+       { IPv4(206,80,32,0),19 },
+       { IPv4(206,80,192,0),19 },
+       { IPv4(206,80,203,0),24 },
+       { IPv4(206,80,205,0),24 },
+       { IPv4(206,80,206,0),24 },
+       { IPv4(206,80,209,0),24 },
+       { IPv4(206,80,210,0),24 },
+       { IPv4(206,80,212,0),24 },
+       { IPv4(206,80,213,0),24 },
+       { IPv4(206,80,214,0),24 },
+       { IPv4(206,80,223,0),24 },
+       { IPv4(206,81,128,0),19 },
+       { IPv4(206,81,136,0),24 },
+       { IPv4(206,81,192,0),19 },
+       { IPv4(206,81,204,0),24 },
+       { IPv4(206,81,205,0),24 },
+       { IPv4(206,81,207,0),24 },
+       { IPv4(206,81,220,0),24 },
+       { IPv4(206,81,220,0),22 },
+       { IPv4(206,82,32,0),19 },
+       { IPv4(206,82,228,0),24 },
+       { IPv4(206,82,248,0),24 },
+       { IPv4(206,83,0,0),19 },
+       { IPv4(206,83,64,0),19 },
+       { IPv4(206,83,87,0),24 },
+       { IPv4(206,83,160,0),19 },
+       { IPv4(206,85,240,0),24 },
+       { IPv4(206,86,0,0),16 },
+       { IPv4(206,86,29,0),24 },
+       { IPv4(206,86,215,0),26 },
+       { IPv4(206,92,172,0),24 },
+       { IPv4(206,96,40,0),23 },
+       { IPv4(206,96,184,0),24 },
+       { IPv4(206,96,185,0),24 },
+       { IPv4(206,96,186,0),24 },
+       { IPv4(206,96,192,0),23 },
+       { IPv4(206,96,200,0),23 },
+       { IPv4(206,96,204,0),22 },
+       { IPv4(206,96,208,0),21 },
+       { IPv4(206,96,216,0),23 },
+       { IPv4(206,96,219,0),24 },
+       { IPv4(206,96,220,0),22 },
+       { IPv4(206,97,122,0),24 },
+       { IPv4(206,97,148,0),22 },
+       { IPv4(206,97,254,0),24 },
+       { IPv4(206,98,48,0),21 },
+       { IPv4(206,98,64,0),24 },
+       { IPv4(206,98,66,0),23 },
+       { IPv4(206,98,68,0),24 },
+       { IPv4(206,98,70,0),23 },
+       { IPv4(206,98,78,0),23 },
+       { IPv4(206,98,80,0),23 },
+       { IPv4(206,98,88,0),21 },
+       { IPv4(206,98,122,0),23 },
+       { IPv4(206,98,124,0),23 },
+       { IPv4(206,98,172,0),23 },
+       { IPv4(206,98,179,0),24 },
+       { IPv4(206,98,232,0),23 },
+       { IPv4(206,98,238,0),24 },
+       { IPv4(206,98,246,0),24 },
+       { IPv4(206,99,32,0),24 },
+       { IPv4(206,99,165,0),24 },
+       { IPv4(206,99,178,0),24 },
+       { IPv4(206,99,186,0),23 },
+       { IPv4(206,99,201,0),24 },
+       { IPv4(206,100,6,0),24 },
+       { IPv4(206,100,7,0),24 },
+       { IPv4(206,100,10,0),23 },
+       { IPv4(206,100,22,0),23 },
+       { IPv4(206,100,56,0),21 },
+       { IPv4(206,100,84,0),23 },
+       { IPv4(206,100,93,0),24 },
+       { IPv4(206,100,148,0),22 },
+       { IPv4(206,100,168,0),23 },
+       { IPv4(206,100,179,0),24 },
+       { IPv4(206,101,68,0),23 },
+       { IPv4(206,101,224,0),22 },
+       { IPv4(206,102,64,0),19 },
+       { IPv4(206,102,127,0),24 },
+       { IPv4(206,102,161,0),24 },
+       { IPv4(206,102,208,0),23 },
+       { IPv4(206,102,212,0),23 },
+       { IPv4(206,102,220,0),22 },
+       { IPv4(206,103,235,0),24 },
+       { IPv4(206,103,236,0),22 },
+       { IPv4(206,103,240,0),20 },
+       { IPv4(206,104,53,0),24 },
+       { IPv4(206,104,144,0),24 },
+       { IPv4(206,104,146,0),24 },
+       { IPv4(206,104,188,0),22 },
+       { IPv4(206,105,172,0),24 },
+       { IPv4(206,105,173,0),24 },
+       { IPv4(206,105,176,0),20 },
+       { IPv4(206,106,192,0),20 },
+       { IPv4(206,106,242,0),24 },
+       { IPv4(206,107,124,0),22 },
+       { IPv4(206,107,180,0),24 },
+       { IPv4(206,107,181,0),24 },
+       { IPv4(206,107,235,0),24 },
+       { IPv4(206,108,4,0),22 },
+       { IPv4(206,108,237,0),24 },
+       { IPv4(206,108,244,0),24 },
+       { IPv4(206,108,246,0),24 },
+       { IPv4(206,111,0,0),16 },
+       { IPv4(206,111,18,0),24 },
+       { IPv4(206,111,49,0),24 },
+       { IPv4(206,111,50,0),24 },
+       { IPv4(206,111,59,0),24 },
+       { IPv4(206,111,102,0),24 },
+       { IPv4(206,111,122,0),24 },
+       { IPv4(206,112,0,0),19 },
+       { IPv4(206,112,32,0),19 },
+       { IPv4(206,112,32,0),24 },
+       { IPv4(206,112,34,0),24 },
+       { IPv4(206,112,47,0),24 },
+       { IPv4(206,113,32,0),24 },
+       { IPv4(206,113,33,172),30 },
+       { IPv4(206,113,35,0),24 },
+       { IPv4(206,113,37,128),25 },
+       { IPv4(206,113,44,0),24 },
+       { IPv4(206,113,46,0),24 },
+       { IPv4(206,113,52,0),24 },
+       { IPv4(206,113,55,0),24 },
+       { IPv4(206,113,56,0),24 },
+       { IPv4(206,113,57,0),24 },
+       { IPv4(206,113,58,0),24 },
+       { IPv4(206,113,59,0),24 },
+       { IPv4(206,113,60,0),24 },
+       { IPv4(206,113,63,0),24 },
+       { IPv4(206,113,195,0),24 },
+       { IPv4(206,113,203,0),24 },
+       { IPv4(206,113,210,0),24 },
+       { IPv4(206,113,224,0),19 },
+       { IPv4(206,114,173,0),24 },
+       { IPv4(206,114,174,0),23 },
+       { IPv4(206,114,176,0),24 },
+       { IPv4(206,114,183,0),24 },
+       { IPv4(206,114,184,0),23 },
+       { IPv4(206,114,186,0),24 },
+       { IPv4(206,115,64,0),19 },
+       { IPv4(206,117,32,0),21 },
+       { IPv4(206,117,63,0),24 },
+       { IPv4(206,117,100,0),22 },
+       { IPv4(206,117,140,0),23 },
+       { IPv4(206,117,166,0),23 },
+       { IPv4(206,117,182,0),24 },
+       { IPv4(206,117,210,0),24 },
+       { IPv4(206,117,212,0),22 },
+       { IPv4(206,121,0,0),16 },
+       { IPv4(206,123,6,0),24 },
+       { IPv4(206,123,23,0),24 },
+       { IPv4(206,123,26,0),23 },
+       { IPv4(206,123,32,0),20 },
+       { IPv4(206,124,160,0),19 },
+       { IPv4(206,124,192,0),19 },
+       { IPv4(206,124,224,0),19 },
+       { IPv4(206,126,160,0),21 },
+       { IPv4(206,126,168,0),21 },
+       { IPv4(206,126,176,0),21 },
+       { IPv4(206,127,0,0),19 },
+       { IPv4(206,127,224,0),19 },
+       { IPv4(206,129,0,0),23 },
+       { IPv4(206,129,97,0),24 },
+       { IPv4(206,129,139,0),24 },
+       { IPv4(206,129,156,0),22 },
+       { IPv4(206,130,22,0),24 },
+       { IPv4(206,130,45,0),24 },
+       { IPv4(206,130,51,0),24 },
+       { IPv4(206,130,57,0),24 },
+       { IPv4(206,130,64,0),24 },
+       { IPv4(206,130,73,0),24 },
+       { IPv4(206,130,87,0),24 },
+       { IPv4(206,130,91,0),24 },
+       { IPv4(206,130,152,0),24 },
+       { IPv4(206,130,227,0),24 },
+       { IPv4(206,130,240,0),24 },
+       { IPv4(206,131,0,0),17 },
+       { IPv4(206,131,128,0),19 },
+       { IPv4(206,131,160,0),19 },
+       { IPv4(206,131,188,0),22 },
+       { IPv4(206,131,208,0),20 },
+       { IPv4(206,132,65,0),24 },
+       { IPv4(206,132,128,0),23 },
+       { IPv4(206,132,212,0),24 },
+       { IPv4(206,135,26,0),24 },
+       { IPv4(206,135,124,0),24 },
+       { IPv4(206,135,197,0),24 },
+       { IPv4(206,136,168,0),21 },
+       { IPv4(206,137,78,0),24 },
+       { IPv4(206,137,131,0),24 },
+       { IPv4(206,137,189,0),24 },
+       { IPv4(206,137,216,0),21 },
+       { IPv4(206,137,233,0),24 },
+       { IPv4(206,138,100,0),22 },
+       { IPv4(206,138,112,0),24 },
+       { IPv4(206,138,168,0),21 },
+       { IPv4(206,138,252,0),22 },
+       { IPv4(206,139,8,0),21 },
+       { IPv4(206,139,176,0),20 },
+       { IPv4(206,142,12,0),23 },
+       { IPv4(206,142,53,0),24 },
+       { IPv4(206,142,77,0),24 },
+       { IPv4(206,142,100,0),24 },
+       { IPv4(206,142,244,0),24 },
+       { IPv4(206,142,245,0),24 },
+       { IPv4(206,142,246,0),23 },
+       { IPv4(206,142,249,0),24 },
+       { IPv4(206,142,255,0),24 },
+       { IPv4(206,143,34,0),24 },
+       { IPv4(206,143,36,0),24 },
+       { IPv4(206,143,128,0),17 },
+       { IPv4(206,143,160,0),24 },
+       { IPv4(206,144,0,0),14 },
+       { IPv4(206,144,16,0),21 },
+       { IPv4(206,144,56,0),21 },
+       { IPv4(206,144,88,0),24 },
+       { IPv4(206,144,96,0),19 },
+       { IPv4(206,144,128,0),18 },
+       { IPv4(206,144,212,0),22 },
+       { IPv4(206,144,248,0),21 },
+       { IPv4(206,145,13,0),24 },
+       { IPv4(206,145,14,0),24 },
+       { IPv4(206,145,24,0),22 },
+       { IPv4(206,145,117,0),24 },
+       { IPv4(206,145,128,0),19 },
+       { IPv4(206,146,16,0),20 },
+       { IPv4(206,146,60,0),24 },
+       { IPv4(206,146,64,0),19 },
+       { IPv4(206,146,143,0),24 },
+       { IPv4(206,146,233,0),24 },
+       { IPv4(206,147,106,0),23 },
+       { IPv4(206,147,192,0),18 },
+       { IPv4(206,149,144,0),22 },
+       { IPv4(206,150,148,0),23 },
+       { IPv4(206,150,164,0),24 },
+       { IPv4(206,150,165,0),24 },
+       { IPv4(206,150,166,0),23 },
+       { IPv4(206,150,169,0),24 },
+       { IPv4(206,150,174,0),23 },
+       { IPv4(206,150,183,0),24 },
+       { IPv4(206,150,187,0),24 },
+       { IPv4(206,150,188,0),23 },
+       { IPv4(206,150,228,0),24 },
+       { IPv4(206,151,136,0),22 },
+       { IPv4(206,151,224,0),19 },
+       { IPv4(206,152,180,0),24 },
+       { IPv4(206,152,180,0),23 },
+       { IPv4(206,152,188,0),22 },
+       { IPv4(206,152,208,0),20 },
+       { IPv4(206,152,227,0),24 },
+       { IPv4(206,153,26,0),24 },
+       { IPv4(206,153,58,0),23 },
+       { IPv4(206,153,176,0),20 },
+       { IPv4(206,154,102,0),24 },
+       { IPv4(206,154,105,0),24 },
+       { IPv4(206,154,169,0),24 },
+       { IPv4(206,155,22,0),24 },
+       { IPv4(206,155,24,0),21 },
+       { IPv4(206,155,136,0),23 },
+       { IPv4(206,155,144,0),20 },
+       { IPv4(206,155,169,0),24 },
+       { IPv4(206,155,172,0),24 },
+       { IPv4(206,155,173,0),24 },
+       { IPv4(206,155,228,0),24 },
+       { IPv4(206,155,229,0),24 },
+       { IPv4(206,155,237,0),24 },
+       { IPv4(206,156,4,0),24 },
+       { IPv4(206,156,8,0),24 },
+       { IPv4(206,156,12,0),24 },
+       { IPv4(206,156,24,0),23 },
+       { IPv4(206,156,26,0),24 },
+       { IPv4(206,156,27,0),24 },
+       { IPv4(206,156,32,0),24 },
+       { IPv4(206,156,76,0),24 },
+       { IPv4(206,157,123,0),24 },
+       { IPv4(206,157,180,0),24 },
+       { IPv4(206,157,192,0),23 },
+       { IPv4(206,157,224,0),21 },
+       { IPv4(206,159,40,0),22 },
+       { IPv4(206,159,192,0),18 },
+       { IPv4(206,160,160,0),21 },
+       { IPv4(206,160,192,0),20 },
+       { IPv4(206,161,72,0),21 },
+       { IPv4(206,161,88,0),21 },
+       { IPv4(206,161,160,0),19 },
+       { IPv4(206,162,0,0),17 },
+       { IPv4(206,162,1,0),24 },
+       { IPv4(206,162,14,0),24 },
+       { IPv4(206,162,15,0),24 },
+       { IPv4(206,162,30,0),24 },
+       { IPv4(206,162,31,0),24 },
+       { IPv4(206,162,36,0),23 },
+       { IPv4(206,162,53,0),24 },
+       { IPv4(206,162,66,0),24 },
+       { IPv4(206,162,73,0),24 },
+       { IPv4(206,162,91,0),24 },
+       { IPv4(206,162,112,0),24 },
+       { IPv4(206,162,113,0),24 },
+       { IPv4(206,162,114,0),24 },
+       { IPv4(206,162,124,0),24 },
+       { IPv4(206,163,0,0),17 },
+       { IPv4(206,163,128,0),18 },
+       { IPv4(206,163,188,0),24 },
+       { IPv4(206,163,192,0),19 },
+       { IPv4(206,165,200,0),22 },
+       { IPv4(206,166,0,0),17 },
+       { IPv4(206,166,128,0),18 },
+       { IPv4(206,166,140,0),22 },
+       { IPv4(206,166,156,0),22 },
+       { IPv4(206,166,192,0),18 },
+       { IPv4(206,166,197,0),24 },
+       { IPv4(206,168,0,0),16 },
+       { IPv4(206,168,43,0),24 },
+       { IPv4(206,168,47,0),24 },
+       { IPv4(206,168,140,0),22 },
+       { IPv4(206,168,187,0),24 },
+       { IPv4(206,168,224,0),24 },
+       { IPv4(206,168,228,0),24 },
+       { IPv4(206,169,0,0),16 },
+       { IPv4(206,169,0,0),21 },
+       { IPv4(206,169,1,0),24 },
+       { IPv4(206,169,8,0),24 },
+       { IPv4(206,169,8,0),22 },
+       { IPv4(206,169,9,0),24 },
+       { IPv4(206,169,10,0),24 },
+       { IPv4(206,169,11,0),24 },
+       { IPv4(206,169,12,0),23 },
+       { IPv4(206,169,14,0),23 },
+       { IPv4(206,169,16,0),21 },
+       { IPv4(206,169,20,0),23 },
+       { IPv4(206,169,24,0),24 },
+       { IPv4(206,169,24,0),21 },
+       { IPv4(206,169,26,0),23 },
+       { IPv4(206,169,29,0),24 },
+       { IPv4(206,169,30,0),24 },
+       { IPv4(206,169,31,0),24 },
+       { IPv4(206,169,32,0),21 },
+       { IPv4(206,169,32,0),23 },
+       { IPv4(206,169,40,0),21 },
+       { IPv4(206,169,48,0),21 },
+       { IPv4(206,169,56,0),21 },
+       { IPv4(206,169,60,0),24 },
+       { IPv4(206,169,62,0),24 },
+       { IPv4(206,169,66,0),24 },
+       { IPv4(206,169,70,0),23 },
+       { IPv4(206,169,71,0),24 },
+       { IPv4(206,169,72,0),21 },
+       { IPv4(206,169,76,0),24 },
+       { IPv4(206,169,80,0),22 },
+       { IPv4(206,169,84,0),24 },
+       { IPv4(206,169,86,0),24 },
+       { IPv4(206,169,87,0),24 },
+       { IPv4(206,169,88,0),22 },
+       { IPv4(206,169,88,0),24 },
+       { IPv4(206,169,89,0),24 },
+       { IPv4(206,169,96,0),22 },
+       { IPv4(206,169,96,0),23 },
+       { IPv4(206,169,99,0),24 },
+       { IPv4(206,169,100,0),22 },
+       { IPv4(206,169,104,0),21 },
+       { IPv4(206,169,106,0),23 },
+       { IPv4(206,169,108,0),23 },
+       { IPv4(206,169,112,0),21 },
+       { IPv4(206,169,120,0),21 },
+       { IPv4(206,169,120,0),24 },
+       { IPv4(206,169,121,0),24 },
+       { IPv4(206,169,122,0),24 },
+       { IPv4(206,169,123,0),24 },
+       { IPv4(206,169,124,0),24 },
+       { IPv4(206,169,127,0),24 },
+       { IPv4(206,169,128,0),22 },
+       { IPv4(206,169,132,0),23 },
+       { IPv4(206,169,132,0),24 },
+       { IPv4(206,169,133,0),24 },
+       { IPv4(206,169,136,0),21 },
+       { IPv4(206,169,144,0),21 },
+       { IPv4(206,169,145,0),24 },
+       { IPv4(206,169,146,0),23 },
+       { IPv4(206,169,150,0),24 },
+       { IPv4(206,169,152,0),21 },
+       { IPv4(206,169,158,0),23 },
+       { IPv4(206,169,160,0),23 },
+       { IPv4(206,169,160,0),21 },
+       { IPv4(206,169,160,0),24 },
+       { IPv4(206,169,161,0),24 },
+       { IPv4(206,169,162,0),23 },
+       { IPv4(206,169,168,0),21 },
+       { IPv4(206,169,173,0),24 },
+       { IPv4(206,169,176,0),21 },
+       { IPv4(206,169,180,0),24 },
+       { IPv4(206,169,182,0),23 },
+       { IPv4(206,169,184,0),24 },
+       { IPv4(206,169,184,0),21 },
+       { IPv4(206,169,192,0),21 },
+       { IPv4(206,169,200,0),22 },
+       { IPv4(206,169,208,0),22 },
+       { IPv4(206,169,212,0),22 },
+       { IPv4(206,169,216,0),21 },
+       { IPv4(206,169,217,0),24 },
+       { IPv4(206,169,218,0),23 },
+       { IPv4(206,169,222,0),23 },
+       { IPv4(206,169,224,0),21 },
+       { IPv4(206,169,232,0),21 },
+       { IPv4(206,169,233,0),24 },
+       { IPv4(206,169,234,0),23 },
+       { IPv4(206,169,237,0),24 },
+       { IPv4(206,169,239,0),24 },
+       { IPv4(206,169,248,0),21 },
+       { IPv4(206,171,214,0),23 },
+       { IPv4(206,173,0,0),16 },
+       { IPv4(206,173,8,0),22 },
+       { IPv4(206,173,12,0),22 },
+       { IPv4(206,173,20,0),22 },
+       { IPv4(206,173,28,0),22 },
+       { IPv4(206,175,82,0),24 },
+       { IPv4(206,176,128,0),19 },
+       { IPv4(206,176,192,0),21 },
+       { IPv4(206,176,200,0),21 },
+       { IPv4(206,176,208,0),21 },
+       { IPv4(206,176,216,0),21 },
+       { IPv4(206,180,64,0),18 },
+       { IPv4(206,180,128,0),19 },
+       { IPv4(206,180,192,0),20 },
+       { IPv4(206,180,199,0),24 },
+       { IPv4(206,180,200,0),24 },
+       { IPv4(206,180,201,0),24 },
+       { IPv4(206,180,204,0),22 },
+       { IPv4(206,180,224,0),20 },
+       { IPv4(206,182,132,0),24 },
+       { IPv4(206,182,146,0),24 },
+       { IPv4(206,182,150,0),24 },
+       { IPv4(206,182,152,0),24 },
+       { IPv4(206,182,157,0),24 },
+       { IPv4(206,182,163,0),24 },
+       { IPv4(206,182,164,0),24 },
+       { IPv4(206,182,184,0),24 },
+       { IPv4(206,182,204,0),24 },
+       { IPv4(206,182,238,0),24 },
+       { IPv4(206,183,64,0),19 },
+       { IPv4(206,183,192,0),19 },
+       { IPv4(206,183,224,0),19 },
+       { IPv4(206,183,242,0),24 },
+       { IPv4(206,184,0,0),16 },
+       { IPv4(206,184,17,0),24 },
+       { IPv4(206,184,20,0),23 },
+       { IPv4(206,187,144,0),21 },
+       { IPv4(206,189,155,0),24 },
+       { IPv4(206,190,32,0),19 },
+       { IPv4(206,190,64,0),19 },
+       { IPv4(206,190,71,0),24 },
+       { IPv4(206,190,72,0),24 },
+       { IPv4(206,190,78,0),23 },
+       { IPv4(206,190,80,0),23 },
+       { IPv4(206,190,82,0),24 },
+       { IPv4(206,190,91,0),24 },
+       { IPv4(206,190,95,0),24 },
+       { IPv4(206,190,128,0),19 },
+       { IPv4(206,190,142,0),24 },
+       { IPv4(206,190,192,0),19 },
+       { IPv4(206,191,0,0),18 },
+       { IPv4(206,191,64,0),18 },
+       { IPv4(206,191,71,0),24 },
+       { IPv4(206,191,77,0),24 },
+       { IPv4(206,191,128,0),18 },
+       { IPv4(206,191,158,0),24 },
+       { IPv4(206,191,163,0),24 },
+       { IPv4(206,191,182,0),23 },
+       { IPv4(206,191,187,0),24 },
+       { IPv4(206,193,128,0),18 },
+       { IPv4(206,194,192,0),18 },
+       { IPv4(206,195,3,0),24 },
+       { IPv4(206,195,4,0),24 },
+       { IPv4(206,195,19,0),24 },
+       { IPv4(206,195,64,0),19 },
+       { IPv4(206,196,64,0),19 },
+       { IPv4(206,196,128,0),19 },
+       { IPv4(206,196,253,0),24 },
+       { IPv4(206,196,254,0),24 },
+       { IPv4(206,197,23,0),24 },
+       { IPv4(206,197,40,0),23 },
+       { IPv4(206,197,43,0),24 },
+       { IPv4(206,197,65,0),24 },
+       { IPv4(206,197,69,0),24 },
+       { IPv4(206,197,74,0),24 },
+       { IPv4(206,197,77,0),24 },
+       { IPv4(206,197,81,0),24 },
+       { IPv4(206,197,104,0),24 },
+       { IPv4(206,197,117,0),24 },
+       { IPv4(206,197,121,0),24 },
+       { IPv4(206,197,144,0),24 },
+       { IPv4(206,197,156,0),24 },
+       { IPv4(206,197,194,0),24 },
+       { IPv4(206,197,206,0),24 },
+       { IPv4(206,197,217,0),24 },
+       { IPv4(206,197,218,0),24 },
+       { IPv4(206,197,219,0),24 },
+       { IPv4(206,197,236,0),24 },
+       { IPv4(206,197,240,0),24 },
+       { IPv4(206,197,251,0),24 },
+       { IPv4(206,198,0,0),16 },
+       { IPv4(206,199,16,0),24 },
+       { IPv4(206,201,0,0),20 },
+       { IPv4(206,201,17,0),24 },
+       { IPv4(206,201,18,0),24 },
+       { IPv4(206,201,19,0),24 },
+       { IPv4(206,201,20,0),24 },
+       { IPv4(206,201,32,0),20 },
+       { IPv4(206,201,172,0),24 },
+       { IPv4(206,201,173,0),24 },
+       { IPv4(206,201,174,0),24 },
+       { IPv4(206,201,192,0),20 },
+       { IPv4(206,201,240,0),20 },
+       { IPv4(206,202,28,0),24 },
+       { IPv4(206,203,171,0),24 },
+       { IPv4(206,204,0,0),16 },
+       { IPv4(206,206,18,0),24 },
+       { IPv4(206,206,24,0),21 },
+       { IPv4(206,206,192,0),18 },
+       { IPv4(206,206,224,0),22 },
+       { IPv4(206,206,228,0),24 },
+       { IPv4(206,206,236,0),22 },
+       { IPv4(206,207,0,0),18 },
+       { IPv4(206,207,16,0),21 },
+       { IPv4(206,207,40,0),24 },
+       { IPv4(206,207,42,0),24 },
+       { IPv4(206,207,45,0),24 },
+       { IPv4(206,207,46,0),24 },
+       { IPv4(206,207,49,0),24 },
+       { IPv4(206,207,50,0),24 },
+       { IPv4(206,207,51,0),24 },
+       { IPv4(206,207,52,0),22 },
+       { IPv4(206,207,64,0),21 },
+       { IPv4(206,207,72,0),23 },
+       { IPv4(206,207,74,0),24 },
+       { IPv4(206,207,96,0),24 },
+       { IPv4(206,207,97,0),24 },
+       { IPv4(206,207,100,0),24 },
+       { IPv4(206,207,113,0),24 },
+       { IPv4(206,207,114,0),23 },
+       { IPv4(206,207,118,0),24 },
+       { IPv4(206,207,119,0),24 },
+       { IPv4(206,207,120,0),23 },
+       { IPv4(206,207,122,0),24 },
+       { IPv4(206,207,128,0),18 },
+       { IPv4(206,207,136,0),24 },
+       { IPv4(206,207,145,0),24 },
+       { IPv4(206,207,153,0),24 },
+       { IPv4(206,207,154,0),24 },
+       { IPv4(206,207,160,0),20 },
+       { IPv4(206,207,186,0),23 },
+       { IPv4(206,207,188,0),23 },
+       { IPv4(206,207,190,0),23 },
+       { IPv4(206,207,192,0),18 },
+       { IPv4(206,207,200,0),24 },
+       { IPv4(206,207,224,0),19 },
+       { IPv4(206,208,2,0),24 },
+       { IPv4(206,208,3,0),24 },
+       { IPv4(206,208,6,0),24 },
+       { IPv4(206,208,88,0),21 },
+       { IPv4(206,208,152,0),24 },
+       { IPv4(206,208,168,0),24 },
+       { IPv4(206,208,169,0),24 },
+       { IPv4(206,208,184,0),21 },
+       { IPv4(206,208,224,0),21 },
+       { IPv4(206,208,236,0),24 },
+       { IPv4(206,208,237,0),24 },
+       { IPv4(206,208,238,0),24 },
+       { IPv4(206,208,239,0),24 },
+       { IPv4(206,208,240,0),22 },
+       { IPv4(206,208,244,0),23 },
+       { IPv4(206,208,246,0),24 },
+       { IPv4(206,208,247,0),24 },
+       { IPv4(206,209,73,0),24 },
+       { IPv4(206,209,96,0),20 },
+       { IPv4(206,209,210,0),24 },
+       { IPv4(206,209,225,0),24 },
+       { IPv4(206,210,26,0),24 },
+       { IPv4(206,210,27,0),24 },
+       { IPv4(206,210,28,0),24 },
+       { IPv4(206,210,29,0),24 },
+       { IPv4(206,210,30,0),24 },
+       { IPv4(206,210,32,0),19 },
+       { IPv4(206,210,64,0),19 },
+       { IPv4(206,210,128,0),19 },
+       { IPv4(206,211,32,0),19 },
+       { IPv4(206,211,96,0),20 },
+       { IPv4(206,211,112,0),21 },
+       { IPv4(206,211,120,0),24 },
+       { IPv4(206,211,121,0),24 },
+       { IPv4(206,211,122,0),24 },
+       { IPv4(206,213,64,0),18 },
+       { IPv4(206,213,128,0),18 },
+       { IPv4(206,213,192,0),19 },
+       { IPv4(206,213,192,0),18 },
+       { IPv4(206,213,209,0),24 },
+       { IPv4(206,213,224,0),19 },
+       { IPv4(206,213,251,0),24 },
+       { IPv4(206,213,252,0),24 },
+       { IPv4(206,214,0,0),15 },
+       { IPv4(206,214,13,0),24 },
+       { IPv4(206,214,25,0),24 },
+       { IPv4(206,214,26,0),24 },
+       { IPv4(206,214,31,0),24 },
+       { IPv4(206,214,33,0),24 },
+       { IPv4(206,214,58,0),24 },
+       { IPv4(206,214,126,0),24 },
+       { IPv4(206,214,172,0),24 },
+       { IPv4(206,214,209,0),24 },
+       { IPv4(206,215,65,0),24 },
+       { IPv4(206,215,66,0),24 },
+       { IPv4(206,215,140,0),24 },
+       { IPv4(206,215,142,0),24 },
+       { IPv4(206,215,143,0),24 },
+       { IPv4(206,215,145,0),24 },
+       { IPv4(206,215,167,0),24 },
+       { IPv4(206,215,217,0),24 },
+       { IPv4(206,215,223,0),24 },
+       { IPv4(206,215,228,0),24 },
+       { IPv4(206,215,229,0),24 },
+       { IPv4(206,215,230,0),24 },
+       { IPv4(206,215,231,0),24 },
+       { IPv4(206,215,236,0),24 },
+       { IPv4(206,215,237,0),24 },
+       { IPv4(206,216,0,0),15 },
+       { IPv4(206,216,1,0),24 },
+       { IPv4(206,216,6,0),24 },
+       { IPv4(206,216,53,0),24 },
+       { IPv4(206,216,77,0),24 },
+       { IPv4(206,216,103,0),24 },
+       { IPv4(206,217,36,0),24 },
+       { IPv4(206,217,102,0),24 },
+       { IPv4(206,217,121,0),24 },
+       { IPv4(206,217,207,0),24 },
+       { IPv4(206,217,239,0),24 },
+       { IPv4(206,219,0,0),20 },
+       { IPv4(206,219,16,0),21 },
+       { IPv4(206,219,35,0),24 },
+       { IPv4(206,219,36,0),22 },
+       { IPv4(206,219,44,0),23 },
+       { IPv4(206,219,49,0),24 },
+       { IPv4(206,219,50,0),23 },
+       { IPv4(206,219,52,0),23 },
+       { IPv4(206,219,64,0),19 },
+       { IPv4(206,219,96,0),19 },
+       { IPv4(206,219,128,0),18 },
+       { IPv4(206,219,192,0),18 },
+       { IPv4(206,220,28,0),24 },
+       { IPv4(206,220,30,0),24 },
+       { IPv4(206,220,64,0),24 },
+       { IPv4(206,220,65,0),24 },
+       { IPv4(206,220,136,0),22 },
+       { IPv4(206,220,168,0),22 },
+       { IPv4(206,220,224,0),22 },
+       { IPv4(206,221,35,0),24 },
+       { IPv4(206,221,164,0),24 },
+       { IPv4(206,221,165,0),24 },
+       { IPv4(206,221,166,0),24 },
+       { IPv4(206,221,167,0),24 },
+       { IPv4(206,222,32,0),19 },
+       { IPv4(206,222,64,0),19 },
+       { IPv4(206,222,96,0),19 },
+       { IPv4(206,222,224,0),19 },
+       { IPv4(206,223,36,0),24 },
+       { IPv4(206,223,39,0),24 },
+       { IPv4(206,223,70,0),24 },
+       { IPv4(206,223,80,0),24 },
+       { IPv4(206,223,93,0),24 },
+       { IPv4(206,223,102,0),24 },
+       { IPv4(206,223,110,0),24 },
+       { IPv4(206,223,132,0),24 },
+       { IPv4(206,223,133,0),24 },
+       { IPv4(206,224,32,0),19 },
+       { IPv4(206,224,64,0),19 },
+       { IPv4(206,225,32,0),19 },
+       { IPv4(206,228,16,0),20 },
+       { IPv4(206,228,64,0),19 },
+       { IPv4(206,228,78,0),24 },
+       { IPv4(206,228,139,0),24 },
+       { IPv4(206,228,141,0),24 },
+       { IPv4(206,228,142,0),24 },
+       { IPv4(206,228,143,0),24 },
+       { IPv4(206,228,186,0),24 },
+       { IPv4(206,228,187,0),24 },
+       { IPv4(206,229,125,0),24 },
+       { IPv4(206,229,220,0),24 },
+       { IPv4(206,229,221,0),24 },
+       { IPv4(206,230,192,0),19 },
+       { IPv4(206,230,221,0),24 },
+       { IPv4(206,231,96,0),24 },
+       { IPv4(206,231,192,0),19 },
+       { IPv4(206,236,78,0),24 },
+       { IPv4(206,236,79,0),24 },
+       { IPv4(206,239,0,0),16 },
+       { IPv4(206,239,10,0),24 },
+       { IPv4(206,240,24,0),22 },
+       { IPv4(206,241,181,0),24 },
+       { IPv4(206,241,182,0),24 },
+       { IPv4(206,243,128,0),23 },
+       { IPv4(206,243,130,0),24 },
+       { IPv4(206,245,128,0),18 },
+       { IPv4(206,245,192,0),18 },
+       { IPv4(206,245,195,0),24 },
+       { IPv4(206,245,199,0),24 },
+       { IPv4(206,245,233,0),24 },
+       { IPv4(206,245,234,0),24 },
+       { IPv4(206,245,235,0),24 },
+       { IPv4(206,245,240,0),24 },
+       { IPv4(206,245,243,0),24 },
+       { IPv4(206,246,32,0),20 },
+       { IPv4(206,246,46,0),23 },
+       { IPv4(206,246,64,0),18 },
+       { IPv4(206,249,14,0),24 },
+       { IPv4(206,250,201,0),24 },
+       { IPv4(206,251,25,0),24 },
+       { IPv4(206,251,128,0),19 },
+       { IPv4(206,251,192,0),24 },
+       { IPv4(206,251,195,0),24 },
+       { IPv4(206,251,196,0),22 },
+       { IPv4(206,251,200,0),21 },
+       { IPv4(206,251,208,0),24 },
+       { IPv4(206,251,210,0),23 },
+       { IPv4(206,251,212,0),24 },
+       { IPv4(206,251,213,0),24 },
+       { IPv4(206,251,214,0),24 },
+       { IPv4(206,251,224,0),19 },
+       { IPv4(206,252,64,0),18 },
+       { IPv4(206,252,128,0),19 },
+       { IPv4(206,252,149,128),26 },
+       { IPv4(206,252,224,0),19 },
+       { IPv4(206,253,64,0),19 },
+       { IPv4(206,253,94,0),24 },
+       { IPv4(206,253,192,0),19 },
+       { IPv4(206,253,224,0),19 },
+       { IPv4(207,0,32,0),21 },
+       { IPv4(207,0,67,0),24 },
+       { IPv4(207,0,68,0),23 },
+       { IPv4(207,0,72,0),23 },
+       { IPv4(207,0,74,0),24 },
+       { IPv4(207,0,112,0),20 },
+       { IPv4(207,0,224,0),20 },
+       { IPv4(207,1,56,0),23 },
+       { IPv4(207,1,96,0),21 },
+       { IPv4(207,1,104,0),22 },
+       { IPv4(207,1,108,0),23 },
+       { IPv4(207,1,136,0),21 },
+       { IPv4(207,1,144,0),21 },
+       { IPv4(207,1,152,0),23 },
+       { IPv4(207,1,201,0),24 },
+       { IPv4(207,2,144,0),20 },
+       { IPv4(207,2,218,0),24 },
+       { IPv4(207,3,144,0),21 },
+       { IPv4(207,3,192,0),19 },
+       { IPv4(207,5,16,0),22 },
+       { IPv4(207,5,40,0),21 },
+       { IPv4(207,5,64,0),22 },
+       { IPv4(207,5,68,0),22 },
+       { IPv4(207,5,80,0),21 },
+       { IPv4(207,5,88,0),21 },
+       { IPv4(207,5,96,0),24 },
+       { IPv4(207,5,97,0),24 },
+       { IPv4(207,5,98,0),24 },
+       { IPv4(207,5,99,0),24 },
+       { IPv4(207,5,100,0),24 },
+       { IPv4(207,5,101,0),24 },
+       { IPv4(207,5,102,0),24 },
+       { IPv4(207,5,103,0),24 },
+       { IPv4(207,5,104,0),24 },
+       { IPv4(207,5,105,0),24 },
+       { IPv4(207,5,106,0),24 },
+       { IPv4(207,5,107,0),24 },
+       { IPv4(207,5,108,0),24 },
+       { IPv4(207,5,109,0),24 },
+       { IPv4(207,5,110,0),24 },
+       { IPv4(207,5,111,0),24 },
+       { IPv4(207,7,16,0),23 },
+       { IPv4(207,7,64,0),20 },
+       { IPv4(207,7,80,0),20 },
+       { IPv4(207,8,0,0),17 },
+       { IPv4(207,8,128,0),17 },
+       { IPv4(207,8,133,0),24 },
+       { IPv4(207,8,149,0),24 },
+       { IPv4(207,8,164,0),22 },
+       { IPv4(207,8,174,0),23 },
+       { IPv4(207,8,196,0),24 },
+       { IPv4(207,8,218,0),24 },
+       { IPv4(207,8,234,0),24 },
+       { IPv4(207,10,0,0),16 },
+       { IPv4(207,10,4,0),24 },
+       { IPv4(207,10,28,0),22 },
+       { IPv4(207,10,44,0),22 },
+       { IPv4(207,10,55,0),24 },
+       { IPv4(207,10,136,0),24 },
+       { IPv4(207,10,137,0),24 },
+       { IPv4(207,10,138,0),24 },
+       { IPv4(207,10,139,0),24 },
+       { IPv4(207,10,140,0),24 },
+       { IPv4(207,10,141,0),24 },
+       { IPv4(207,10,142,0),24 },
+       { IPv4(207,10,143,0),24 },
+       { IPv4(207,10,161,0),24 },
+       { IPv4(207,10,196,0),24 },
+       { IPv4(207,10,197,0),24 },
+       { IPv4(207,10,198,0),24 },
+       { IPv4(207,10,199,0),24 },
+       { IPv4(207,10,206,0),24 },
+       { IPv4(207,11,0,0),17 },
+       { IPv4(207,11,207,0),24 },
+       { IPv4(207,11,208,0),24 },
+       { IPv4(207,11,210,0),24 },
+       { IPv4(207,12,0,0),20 },
+       { IPv4(207,12,16,0),20 },
+       { IPv4(207,12,19,0),24 },
+       { IPv4(207,12,20,0),23 },
+       { IPv4(207,12,22,0),24 },
+       { IPv4(207,12,181,0),24 },
+       { IPv4(207,13,14,0),24 },
+       { IPv4(207,13,175,0),24 },
+       { IPv4(207,13,230,0),24 },
+       { IPv4(207,14,96,0),21 },
+       { IPv4(207,14,97,0),24 },
+       { IPv4(207,14,98,0),23 },
+       { IPv4(207,14,100,0),24 },
+       { IPv4(207,14,104,0),21 },
+       { IPv4(207,14,109,0),24 },
+       { IPv4(207,14,144,0),20 },
+       { IPv4(207,14,160,0),24 },
+       { IPv4(207,14,161,0),24 },
+       { IPv4(207,14,192,0),20 },
+       { IPv4(207,14,210,0),24 },
+       { IPv4(207,14,211,0),24 },
+       { IPv4(207,15,208,0),21 },
+       { IPv4(207,16,46,0),24 },
+       { IPv4(207,16,47,0),24 },
+       { IPv4(207,16,68,0),24 },
+       { IPv4(207,16,70,0),24 },
+       { IPv4(207,16,71,0),24 },
+       { IPv4(207,17,33,0),24 },
+       { IPv4(207,17,34,0),24 },
+       { IPv4(207,17,35,0),24 },
+       { IPv4(207,17,37,0),24 },
+       { IPv4(207,17,46,0),24 },
+       { IPv4(207,17,47,0),24 },
+       { IPv4(207,17,52,0),24 },
+       { IPv4(207,17,53,0),24 },
+       { IPv4(207,17,67,0),24 },
+       { IPv4(207,17,191,0),24 },
+       { IPv4(207,17,212,0),22 },
+       { IPv4(207,18,112,0),22 },
+       { IPv4(207,18,144,0),20 },
+       { IPv4(207,18,184,0),24 },
+       { IPv4(207,18,193,0),24 },
+       { IPv4(207,19,96,0),21 },
+       { IPv4(207,19,194,0),23 },
+       { IPv4(207,20,0,0),16 },
+       { IPv4(207,20,85,0),24 },
+       { IPv4(207,20,127,0),24 },
+       { IPv4(207,20,139,0),24 },
+       { IPv4(207,21,0,0),17 },
+       { IPv4(207,21,33,0),24 },
+       { IPv4(207,21,34,0),24 },
+       { IPv4(207,21,128,0),18 },
+       { IPv4(207,22,64,0),18 },
+       { IPv4(207,22,135,0),24 },
+       { IPv4(207,25,68,0),24 },
+       { IPv4(207,25,71,0),24 },
+       { IPv4(207,25,79,0),24 },
+       { IPv4(207,25,80,0),24 },
+       { IPv4(207,25,182,0),24 },
+       { IPv4(207,25,225,0),24 },
+       { IPv4(207,25,248,0),21 },
+       { IPv4(207,26,208,0),21 },
+       { IPv4(207,26,230,0),23 },
+       { IPv4(207,28,0,0),16 },
+       { IPv4(207,28,96,0),20 },
+       { IPv4(207,28,112,0),22 },
+       { IPv4(207,28,116,0),23 },
+       { IPv4(207,29,192,0),20 },
+       { IPv4(207,31,0,0),18 },
+       { IPv4(207,31,64,0),18 },
+       { IPv4(207,31,68,0),24 },
+       { IPv4(207,31,72,0),24 },
+       { IPv4(207,31,73,0),24 },
+       { IPv4(207,31,75,0),24 },
+       { IPv4(207,31,81,0),24 },
+       { IPv4(207,31,82,0),24 },
+       { IPv4(207,31,84,0),24 },
+       { IPv4(207,31,92,0),23 },
+       { IPv4(207,31,96,0),21 },
+       { IPv4(207,31,118,0),24 },
+       { IPv4(207,31,122,0),24 },
+       { IPv4(207,31,123,0),24 },
+       { IPv4(207,31,128,0),18 },
+       { IPv4(207,31,192,0),18 },
+       { IPv4(207,32,0,0),18 },
+       { IPv4(207,32,35,0),24 },
+       { IPv4(207,32,64,0),18 },
+       { IPv4(207,33,0,0),16 },
+       { IPv4(207,33,48,0),24 },
+       { IPv4(207,33,49,0),24 },
+       { IPv4(207,33,50,0),24 },
+       { IPv4(207,33,51,0),24 },
+       { IPv4(207,33,55,0),24 },
+       { IPv4(207,33,112,0),24 },
+       { IPv4(207,33,113,0),24 },
+       { IPv4(207,33,114,0),24 },
+       { IPv4(207,34,50,0),24 },
+       { IPv4(207,36,0,0),16 },
+       { IPv4(207,36,32,0),19 },
+       { IPv4(207,36,64,0),19 },
+       { IPv4(207,36,96,0),19 },
+       { IPv4(207,36,202,0),23 },
+       { IPv4(207,36,210,0),23 },
+       { IPv4(207,36,214,0),23 },
+       { IPv4(207,36,240,0),23 },
+       { IPv4(207,36,246,0),23 },
+       { IPv4(207,36,248,0),23 },
+       { IPv4(207,36,250,0),23 },
+       { IPv4(207,37,34,0),24 },
+       { IPv4(207,38,102,0),24 },
+       { IPv4(207,38,128,0),17 },
+       { IPv4(207,40,5,0),24 },
+       { IPv4(207,40,6,0),24 },
+       { IPv4(207,40,14,0),23 },
+       { IPv4(207,40,105,0),24 },
+       { IPv4(207,40,196,0),23 },
+       { IPv4(207,41,144,0),20 },
+       { IPv4(207,42,0,0),20 },
+       { IPv4(207,42,48,0),24 },
+       { IPv4(207,42,153,0),24 },
+       { IPv4(207,42,200,0),23 },
+       { IPv4(207,42,238,0),24 },
+       { IPv4(207,43,71,0),24 },
+       { IPv4(207,43,120,0),22 },
+       { IPv4(207,43,180,0),22 },
+       { IPv4(207,43,208,0),21 },
+       { IPv4(207,44,0,0),17 },
+       { IPv4(207,44,20,0),24 },
+       { IPv4(207,44,24,0),23 },
+       { IPv4(207,44,30,0),23 },
+       { IPv4(207,44,95,0),24 },
+       { IPv4(207,45,40,0),24 },
+       { IPv4(207,45,41,0),24 },
+       { IPv4(207,45,66,0),24 },
+       { IPv4(207,45,67,0),24 },
+       { IPv4(207,45,68,0),24 },
+       { IPv4(207,45,70,0),24 },
+       { IPv4(207,45,71,0),24 },
+       { IPv4(207,45,96,0),21 },
+       { IPv4(207,45,130,0),24 },
+       { IPv4(207,45,240,0),23 },
+       { IPv4(207,46,0,0),19 },
+       { IPv4(207,46,32,0),19 },
+       { IPv4(207,46,64,0),19 },
+       { IPv4(207,46,128,0),18 },
+       { IPv4(207,46,192,0),18 },
+       { IPv4(207,48,19,0),24 },
+       { IPv4(207,48,34,0),23 },
+       { IPv4(207,48,42,0),23 },
+       { IPv4(207,48,186,0),23 },
+       { IPv4(207,48,188,0),23 },
+       { IPv4(207,48,190,0),24 },
+       { IPv4(207,49,20,0),22 },
+       { IPv4(207,49,40,0),23 },
+       { IPv4(207,49,128,0),23 },
+       { IPv4(207,49,130,0),23 },
+       { IPv4(207,49,132,0),23 },
+       { IPv4(207,49,156,0),24 },
+       { IPv4(207,49,157,0),24 },
+       { IPv4(207,49,158,0),24 },
+       { IPv4(207,49,159,0),24 },
+       { IPv4(207,50,32,0),22 },
+       { IPv4(207,50,36,0),22 },
+       { IPv4(207,50,40,0),22 },
+       { IPv4(207,50,44,0),22 },
+       { IPv4(207,50,48,0),21 },
+       { IPv4(207,50,56,0),24 },
+       { IPv4(207,50,57,0),24 },
+       { IPv4(207,50,58,0),23 },
+       { IPv4(207,50,60,0),22 },
+       { IPv4(207,50,76,0),24 },
+       { IPv4(207,50,80,0),22 },
+       { IPv4(207,50,84,0),22 },
+       { IPv4(207,50,88,0),22 },
+       { IPv4(207,50,92,0),22 },
+       { IPv4(207,50,96,0),20 },
+       { IPv4(207,50,220,0),22 },
+       { IPv4(207,50,222,0),24 },
+       { IPv4(207,50,248,0),22 },
+       { IPv4(207,51,62,0),23 },
+       { IPv4(207,51,64,0),24 },
+       { IPv4(207,51,69,0),24 },
+       { IPv4(207,51,74,0),24 },
+       { IPv4(207,51,78,0),24 },
+       { IPv4(207,51,89,0),24 },
+       { IPv4(207,51,90,0),23 },
+       { IPv4(207,51,92,0),24 },
+       { IPv4(207,51,93,0),24 },
+       { IPv4(207,51,94,0),24 },
+       { IPv4(207,51,148,0),23 },
+       { IPv4(207,51,157,0),24 },
+       { IPv4(207,51,216,0),24 },
+       { IPv4(207,52,42,0),24 },
+       { IPv4(207,53,39,0),24 },
+       { IPv4(207,53,80,0),24 },
+       { IPv4(207,53,87,0),24 },
+       { IPv4(207,53,128,0),18 },
+       { IPv4(207,53,172,0),24 },
+       { IPv4(207,53,183,0),24 },
+       { IPv4(207,53,184,0),23 },
+       { IPv4(207,53,224,0),20 },
+       { IPv4(207,54,32,0),19 },
+       { IPv4(207,54,96,0),24 },
+       { IPv4(207,54,97,0),24 },
+       { IPv4(207,54,98,0),24 },
+       { IPv4(207,54,99,0),24 },
+       { IPv4(207,54,101,0),24 },
+       { IPv4(207,54,102,0),24 },
+       { IPv4(207,55,128,0),18 },
+       { IPv4(207,55,192,0),19 },
+       { IPv4(207,56,0,0),15 },
+       { IPv4(207,58,0,0),17 },
+       { IPv4(207,61,42,0),23 },
+       { IPv4(207,61,146,0),24 },
+       { IPv4(207,62,0,0),16 },
+       { IPv4(207,63,0,0),16 },
+       { IPv4(207,65,0,0),16 },
+       { IPv4(207,65,0,0),18 },
+       { IPv4(207,65,64,0),18 },
+       { IPv4(207,65,100,0),24 },
+       { IPv4(207,65,128,0),18 },
+       { IPv4(207,65,192,0),18 },
+       { IPv4(207,66,11,0),24 },
+       { IPv4(207,66,161,0),24 },
+       { IPv4(207,66,171,0),24 },
+       { IPv4(207,66,186,0),24 },
+       { IPv4(207,66,193,0),24 },
+       { IPv4(207,66,228,0),24 },
+       { IPv4(207,67,0,0),17 },
+       { IPv4(207,67,104,0),24 },
+       { IPv4(207,67,107,0),24 },
+       { IPv4(207,67,128,0),17 },
+       { IPv4(207,67,130,0),24 },
+       { IPv4(207,67,137,0),24 },
+       { IPv4(207,67,142,0),23 },
+       { IPv4(207,67,200,0),21 },
+       { IPv4(207,67,215,0),24 },
+       { IPv4(207,67,216,0),21 },
+       { IPv4(207,67,229,0),24 },
+       { IPv4(207,68,128,0),20 },
+       { IPv4(207,68,160,0),19 },
+       { IPv4(207,69,0,0),16 },
+       { IPv4(207,70,27,0),24 },
+       { IPv4(207,70,35,0),24 },
+       { IPv4(207,70,40,0),24 },
+       { IPv4(207,70,42,0),24 },
+       { IPv4(207,70,44,0),23 },
+       { IPv4(207,70,53,0),24 },
+       { IPv4(207,70,64,0),18 },
+       { IPv4(207,70,73,0),24 },
+       { IPv4(207,70,128,0),19 },
+       { IPv4(207,70,160,0),19 },
+       { IPv4(207,70,170,0),24 },
+       { IPv4(207,71,8,0),24 },
+       { IPv4(207,71,44,0),23 },
+       { IPv4(207,71,64,0),18 },
+       { IPv4(207,71,192,0),18 },
+       { IPv4(207,74,176,0),21 },
+       { IPv4(207,74,184,0),22 },
+       { IPv4(207,75,116,0),22 },
+       { IPv4(207,75,120,0),22 },
+       { IPv4(207,75,124,0),23 },
+       { IPv4(207,75,126,0),24 },
+       { IPv4(207,76,72,0),21 },
+       { IPv4(207,76,168,0),24 },
+       { IPv4(207,76,169,0),24 },
+       { IPv4(207,76,170,0),24 },
+       { IPv4(207,76,171,0),24 },
+       { IPv4(207,76,172,0),24 },
+       { IPv4(207,76,173,0),24 },
+       { IPv4(207,76,174,0),24 },
+       { IPv4(207,76,175,0),24 },
+       { IPv4(207,77,72,0),24 },
+       { IPv4(207,77,83,0),24 },
+       { IPv4(207,77,220,0),24 },
+       { IPv4(207,78,8,0),21 },
+       { IPv4(207,78,95,0),24 },
+       { IPv4(207,78,104,0),24 },
+       { IPv4(207,82,250,0),23 },
+       { IPv4(207,82,252,0),23 },
+       { IPv4(207,83,64,0),19 },
+       { IPv4(207,84,253,0),24 },
+       { IPv4(207,86,78,0),23 },
+       { IPv4(207,86,86,0),23 },
+       { IPv4(207,86,172,0),22 },
+       { IPv4(207,86,244,0),22 },
+       { IPv4(207,87,80,0),22 },
+       { IPv4(207,87,162,0),24 },
+       { IPv4(207,87,182,0),23 },
+       { IPv4(207,87,184,0),23 },
+       { IPv4(207,87,205,0),24 },
+       { IPv4(207,87,214,0),24 },
+       { IPv4(207,88,0,0),16 },
+       { IPv4(207,88,25,0),24 },
+       { IPv4(207,88,57,0),24 },
+       { IPv4(207,88,58,0),24 },
+       { IPv4(207,88,192,0),24 },
+       { IPv4(207,88,193,0),24 },
+       { IPv4(207,89,163,0),24 },
+       { IPv4(207,89,165,0),24 },
+       { IPv4(207,89,166,0),23 },
+       { IPv4(207,90,0,0),18 },
+       { IPv4(207,90,64,0),18 },
+       { IPv4(207,90,192,0),18 },
+       { IPv4(207,91,64,0),18 },
+       { IPv4(207,91,106,0),23 },
+       { IPv4(207,91,108,0),23 },
+       { IPv4(207,92,0,0),14 },
+       { IPv4(207,92,175,0),24 },
+       { IPv4(207,93,45,0),24 },
+       { IPv4(207,93,60,0),24 },
+       { IPv4(207,93,93,0),24 },
+       { IPv4(207,93,132,0),24 },
+       { IPv4(207,94,33,0),24 },
+       { IPv4(207,94,100,0),24 },
+       { IPv4(207,94,225,0),24 },
+       { IPv4(207,94,229,0),24 },
+       { IPv4(207,96,0,0),17 },
+       { IPv4(207,96,128,0),17 },
+       { IPv4(207,97,0,0),17 },
+       { IPv4(207,97,61,0),24 },
+       { IPv4(207,99,0,0),17 },
+       { IPv4(207,99,22,0),24 },
+       { IPv4(207,99,128,0),17 },
+       { IPv4(207,100,24,0),23 },
+       { IPv4(207,100,32,0),20 },
+       { IPv4(207,101,0,0),20 },
+       { IPv4(207,105,237,0),24 },
+       { IPv4(207,105,239,0),24 },
+       { IPv4(207,106,0,0),16 },
+       { IPv4(207,106,0,0),17 },
+       { IPv4(207,106,31,0),24 },
+       { IPv4(207,106,41,0),24 },
+       { IPv4(207,106,42,0),24 },
+       { IPv4(207,106,45,0),24 },
+       { IPv4(207,106,49,0),24 },
+       { IPv4(207,106,54,0),23 },
+       { IPv4(207,106,84,0),24 },
+       { IPv4(207,106,119,0),24 },
+       { IPv4(207,106,121,0),24 },
+       { IPv4(207,106,128,0),17 },
+       { IPv4(207,106,167,0),24 },
+       { IPv4(207,107,37,0),24 },
+       { IPv4(207,107,134,0),24 },
+       { IPv4(207,108,146,0),24 },
+       { IPv4(207,108,195,0),24 },
+       { IPv4(207,109,20,0),24 },
+       { IPv4(207,110,0,0),18 },
+       { IPv4(207,111,19,0),24 },
+       { IPv4(207,111,20,0),24 },
+       { IPv4(207,111,22,0),23 },
+       { IPv4(207,111,24,0),23 },
+       { IPv4(207,111,64,0),18 },
+       { IPv4(207,111,160,0),20 },
+       { IPv4(207,111,192,0),18 },
+       { IPv4(207,112,140,0),22 },
+       { IPv4(207,112,156,0),22 },
+       { IPv4(207,112,192,0),21 },
+       { IPv4(207,112,204,0),22 },
+       { IPv4(207,112,236,0),23 },
+       { IPv4(207,113,0,0),17 },
+       { IPv4(207,113,128,0),17 },
+       { IPv4(207,113,129,0),24 },
+       { IPv4(207,113,130,0),24 },
+       { IPv4(207,113,134,0),24 },
+       { IPv4(207,113,140,0),24 },
+       { IPv4(207,113,141,0),24 },
+       { IPv4(207,113,155,0),24 },
+       { IPv4(207,113,156,0),24 },
+       { IPv4(207,113,167,0),24 },
+       { IPv4(207,113,201,0),24 },
+       { IPv4(207,113,222,0),24 },
+       { IPv4(207,114,0,0),17 },
+       { IPv4(207,114,128,0),17 },
+       { IPv4(207,114,131,0),24 },
+       { IPv4(207,114,134,0),24 },
+       { IPv4(207,114,135,0),24 },
+       { IPv4(207,114,140,0),24 },
+       { IPv4(207,114,141,0),24 },
+       { IPv4(207,114,142,0),24 },
+       { IPv4(207,114,143,0),24 },
+       { IPv4(207,114,146,0),24 },
+       { IPv4(207,114,147,0),24 },
+       { IPv4(207,114,148,0),24 },
+       { IPv4(207,114,149,0),24 },
+       { IPv4(207,114,150,0),24 },
+       { IPv4(207,114,151,0),24 },
+       { IPv4(207,114,153,0),24 },
+       { IPv4(207,114,160,0),24 },
+       { IPv4(207,114,162,0),24 },
+       { IPv4(207,114,168,0),24 },
+       { IPv4(207,114,170,0),24 },
+       { IPv4(207,114,171,0),24 },
+       { IPv4(207,114,177,0),24 },
+       { IPv4(207,114,186,0),23 },
+       { IPv4(207,114,193,0),24 },
+       { IPv4(207,114,199,0),24 },
+       { IPv4(207,114,201,0),24 },
+       { IPv4(207,114,202,0),24 },
+       { IPv4(207,114,207,0),24 },
+       { IPv4(207,114,208,0),23 },
+       { IPv4(207,114,212,0),24 },
+       { IPv4(207,114,213,0),24 },
+       { IPv4(207,114,214,0),24 },
+       { IPv4(207,114,215,0),24 },
+       { IPv4(207,114,221,0),24 },
+       { IPv4(207,114,232,0),24 },
+       { IPv4(207,114,236,0),24 },
+       { IPv4(207,114,241,0),24 },
+       { IPv4(207,114,248,0),21 },
+       { IPv4(207,114,253,0),24 },
+       { IPv4(207,115,0,0),18 },
+       { IPv4(207,115,63,0),24 },
+       { IPv4(207,115,64,0),19 },
+       { IPv4(207,115,235,0),24 },
+       { IPv4(207,117,0,0),16 },
+       { IPv4(207,117,8,0),24 },
+       { IPv4(207,117,33,0),24 },
+       { IPv4(207,117,42,0),24 },
+       { IPv4(207,117,66,0),24 },
+       { IPv4(207,117,80,0),24 },
+       { IPv4(207,117,106,0),24 },
+       { IPv4(207,117,162,0),24 },
+       { IPv4(207,117,210,0),24 },
+       { IPv4(207,117,246,0),24 },
+       { IPv4(207,120,28,0),22 },
+       { IPv4(207,120,109,0),24 },
+       { IPv4(207,120,160,0),23 },
+       { IPv4(207,120,198,0),23 },
+       { IPv4(207,120,200,0),24 },
+       { IPv4(207,120,213,0),24 },
+       { IPv4(207,120,214,0),23 },
+       { IPv4(207,122,32,0),24 },
+       { IPv4(207,123,13,0),24 },
+       { IPv4(207,123,219,0),24 },
+       { IPv4(207,124,75,0),24 },
+       { IPv4(207,124,89,0),24 },
+       { IPv4(207,124,90,0),24 },
+       { IPv4(207,124,114,0),24 },
+       { IPv4(207,124,115,0),24 },
+       { IPv4(207,124,144,0),22 },
+       { IPv4(207,124,171,0),24 },
+       { IPv4(207,124,172,0),22 },
+       { IPv4(207,124,176,0),23 },
+       { IPv4(207,124,231,0),24 },
+       { IPv4(207,126,96,0),19 },
+       { IPv4(207,126,97,0),24 },
+       { IPv4(207,126,128,0),24 },
+       { IPv4(207,126,129,0),24 },
+       { IPv4(207,126,130,0),24 },
+       { IPv4(207,126,131,0),24 },
+       { IPv4(207,126,132,0),24 },
+       { IPv4(207,126,133,0),24 },
+       { IPv4(207,126,134,0),24 },
+       { IPv4(207,126,135,0),24 },
+       { IPv4(207,127,0,0),16 },
+       { IPv4(207,127,69,0),24 },
+       { IPv4(207,127,96,0),24 },
+       { IPv4(207,127,97,0),24 },
+       { IPv4(207,127,98,0),24 },
+       { IPv4(207,127,99,0),24 },
+       { IPv4(207,127,104,0),24 },
+       { IPv4(207,127,105,0),24 },
+       { IPv4(207,127,106,0),24 },
+       { IPv4(207,127,107,0),24 },
+       { IPv4(207,127,108,0),24 },
+       { IPv4(207,127,109,0),24 },
+       { IPv4(207,127,110,0),24 },
+       { IPv4(207,127,111,0),24 },
+       { IPv4(207,127,113,0),24 },
+       { IPv4(207,127,116,0),24 },
+       { IPv4(207,127,117,0),24 },
+       { IPv4(207,127,120,0),21 },
+       { IPv4(207,127,128,0),24 },
+       { IPv4(207,127,135,0),24 },
+       { IPv4(207,127,138,0),24 },
+       { IPv4(207,127,151,0),24 },
+       { IPv4(207,127,152,0),24 },
+       { IPv4(207,127,210,0),24 },
+       { IPv4(207,127,211,0),24 },
+       { IPv4(207,127,224,0),22 },
+       { IPv4(207,127,231,0),24 },
+       { IPv4(207,127,237,0),24 },
+       { IPv4(207,127,238,0),24 },
+       { IPv4(207,127,239,0),24 },
+       { IPv4(207,128,0,0),14 },
+       { IPv4(207,132,0,0),19 },
+       { IPv4(207,132,32,0),19 },
+       { IPv4(207,132,37,0),24 },
+       { IPv4(207,132,38,0),24 },
+       { IPv4(207,132,64,0),18 },
+       { IPv4(207,132,72,0),24 },
+       { IPv4(207,132,82,0),24 },
+       { IPv4(207,132,89,0),24 },
+       { IPv4(207,132,98,0),24 },
+       { IPv4(207,132,99,0),24 },
+       { IPv4(207,132,102,0),24 },
+       { IPv4(207,132,106,0),24 },
+       { IPv4(207,132,128,0),17 },
+       { IPv4(207,132,136,0),22 },
+       { IPv4(207,132,144,0),22 },
+       { IPv4(207,132,152,0),22 },
+       { IPv4(207,132,156,0),22 },
+       { IPv4(207,132,164,0),22 },
+       { IPv4(207,132,168,0),22 },
+       { IPv4(207,132,226,0),24 },
+       { IPv4(207,132,227,0),24 },
+       { IPv4(207,132,228,0),22 },
+       { IPv4(207,132,230,0),24 },
+       { IPv4(207,132,236,0),24 },
+       { IPv4(207,132,237,0),24 },
+       { IPv4(207,132,238,0),24 },
+       { IPv4(207,132,239,0),24 },
+       { IPv4(207,132,254,0),24 },
+       { IPv4(207,133,0,0),16 },
+       { IPv4(207,133,87,0),24 },
+       { IPv4(207,133,93,0),24 },
+       { IPv4(207,133,121,0),24 },
+       { IPv4(207,133,122,0),24 },
+       { IPv4(207,133,141,0),24 },
+       { IPv4(207,133,142,0),24 },
+       { IPv4(207,133,152,0),24 },
+       { IPv4(207,133,153,0),24 },
+       { IPv4(207,133,179,0),24 },
+       { IPv4(207,133,186,0),24 },
+       { IPv4(207,133,191,0),24 },
+       { IPv4(207,133,211,0),24 },
+       { IPv4(207,133,224,0),24 },
+       { IPv4(207,133,225,0),24 },
+       { IPv4(207,133,226,0),24 },
+       { IPv4(207,133,227,0),24 },
+       { IPv4(207,133,228,0),24 },
+       { IPv4(207,133,229,0),24 },
+       { IPv4(207,133,230,0),24 },
+       { IPv4(207,133,237,0),24 },
+       { IPv4(207,133,238,0),24 },
+       { IPv4(207,133,239,0),24 },
+       { IPv4(207,135,64,0),18 },
+       { IPv4(207,135,128,0),19 },
+       { IPv4(207,136,128,0),19 },
+       { IPv4(207,136,192,0),18 },
+       { IPv4(207,137,0,0),16 },
+       { IPv4(207,137,0,0),20 },
+       { IPv4(207,137,52,0),24 },
+       { IPv4(207,137,53,0),24 },
+       { IPv4(207,137,54,0),24 },
+       { IPv4(207,137,103,0),24 },
+       { IPv4(207,137,184,0),22 },
+       { IPv4(207,140,0,0),15 },
+       { IPv4(207,140,30,0),23 },
+       { IPv4(207,140,66,0),24 },
+       { IPv4(207,140,80,0),24 },
+       { IPv4(207,140,140,0),24 },
+       { IPv4(207,140,149,0),24 },
+       { IPv4(207,140,191,0),24 },
+       { IPv4(207,140,224,0),21 },
+       { IPv4(207,140,250,0),24 },
+       { IPv4(207,141,37,0),24 },
+       { IPv4(207,141,56,0),22 },
+       { IPv4(207,141,150,0),24 },
+       { IPv4(207,144,0,0),16 },
+       { IPv4(207,148,192,0),19 },
+       { IPv4(207,149,14,0),24 },
+       { IPv4(207,149,47,0),24 },
+       { IPv4(207,149,51,0),24 },
+       { IPv4(207,149,52,0),22 },
+       { IPv4(207,149,81,0),24 },
+       { IPv4(207,149,113,0),24 },
+       { IPv4(207,149,115,0),24 },
+       { IPv4(207,149,192,0),22 },
+       { IPv4(207,149,230,0),23 },
+       { IPv4(207,150,0,0),17 },
+       { IPv4(207,150,128,0),19 },
+       { IPv4(207,150,224,0),20 },
+       { IPv4(207,151,146,0),23 },
+       { IPv4(207,151,152,0),21 },
+       { IPv4(207,151,160,0),21 },
+       { IPv4(207,152,64,0),18 },
+       { IPv4(207,152,128,0),19 },
+       { IPv4(207,152,128,0),18 },
+       { IPv4(207,152,152,0),24 },
+       { IPv4(207,152,166,0),24 },
+       { IPv4(207,152,168,0),24 },
+       { IPv4(207,152,169,0),24 },
+       { IPv4(207,152,170,0),24 },
+       { IPv4(207,152,171,0),24 },
+       { IPv4(207,152,172,0),24 },
+       { IPv4(207,153,0,0),18 },
+       { IPv4(207,153,64,0),18 },
+       { IPv4(207,153,67,0),24 },
+       { IPv4(207,153,68,0),24 },
+       { IPv4(207,153,90,0),24 },
+       { IPv4(207,153,92,0),24 },
+       { IPv4(207,153,93,0),24 },
+       { IPv4(207,153,104,0),24 },
+       { IPv4(207,153,107,0),24 },
+       { IPv4(207,153,108,0),24 },
+       { IPv4(207,153,110,0),24 },
+       { IPv4(207,153,111,0),24 },
+       { IPv4(207,153,112,0),24 },
+       { IPv4(207,153,115,0),24 },
+       { IPv4(207,153,122,0),24 },
+       { IPv4(207,153,123,0),24 },
+       { IPv4(207,153,127,0),24 },
+       { IPv4(207,153,128,0),18 },
+       { IPv4(207,153,192,0),18 },
+       { IPv4(207,154,0,0),18 },
+       { IPv4(207,155,0,0),16 },
+       { IPv4(207,155,0,0),17 },
+       { IPv4(207,155,128,0),17 },
+       { IPv4(207,155,140,0),22 },
+       { IPv4(207,156,128,0),17 },
+       { IPv4(207,157,128,0),17 },
+       { IPv4(207,158,0,0),18 },
+       { IPv4(207,158,192,0),18 },
+       { IPv4(207,159,0,0),18 },
+       { IPv4(207,159,192,0),18 },
+       { IPv4(207,160,0,0),17 },
+       { IPv4(207,160,128,0),18 },
+       { IPv4(207,160,192,0),19 },
+       { IPv4(207,160,224,0),19 },
+       { IPv4(207,161,6,0),24 },
+       { IPv4(207,161,67,0),24 },
+       { IPv4(207,161,84,0),24 },
+       { IPv4(207,161,104,0),24 },
+       { IPv4(207,161,106,0),24 },
+       { IPv4(207,161,108,0),24 },
+       { IPv4(207,161,149,0),24 },
+       { IPv4(207,161,150,0),24 },
+       { IPv4(207,161,188,0),22 },
+       { IPv4(207,161,224,0),21 },
+       { IPv4(207,161,241,0),24 },
+       { IPv4(207,165,0,0),16 },
+       { IPv4(207,167,130,0),23 },
+       { IPv4(207,168,128,0),19 },
+       { IPv4(207,170,128,0),18 },
+       { IPv4(207,170,192,0),21 },
+       { IPv4(207,170,200,0),21 },
+       { IPv4(207,170,208,0),21 },
+       { IPv4(207,170,216,0),21 },
+       { IPv4(207,170,216,0),23 },
+       { IPv4(207,170,218,0),24 },
+       { IPv4(207,170,222,0),24 },
+       { IPv4(207,170,224,0),21 },
+       { IPv4(207,170,227,0),24 },
+       { IPv4(207,170,228,0),24 },
+       { IPv4(207,170,229,0),24 },
+       { IPv4(207,170,230,0),23 },
+       { IPv4(207,170,230,0),24 },
+       { IPv4(207,170,232,0),21 },
+       { IPv4(207,170,232,0),24 },
+       { IPv4(207,170,237,0),24 },
+       { IPv4(207,170,239,0),24 },
+       { IPv4(207,170,240,0),21 },
+       { IPv4(207,170,243,0),24 },
+       { IPv4(207,170,248,0),21 },
+       { IPv4(207,171,64,0),18 },
+       { IPv4(207,171,160,0),20 },
+       { IPv4(207,171,176,0),20 },
+       { IPv4(207,172,0,0),16 },
+       { IPv4(207,173,128,0),20 },
+       { IPv4(207,173,224,0),24 },
+       { IPv4(207,173,229,0),24 },
+       { IPv4(207,173,230,0),24 },
+       { IPv4(207,174,0,0),17 },
+       { IPv4(207,174,6,0),24 },
+       { IPv4(207,174,69,0),24 },
+       { IPv4(207,174,72,0),24 },
+       { IPv4(207,174,103,0),24 },
+       { IPv4(207,174,128,0),18 },
+       { IPv4(207,174,134,0),24 },
+       { IPv4(207,174,137,0),24 },
+       { IPv4(207,174,138,0),24 },
+       { IPv4(207,174,140,0),24 },
+       { IPv4(207,174,142,0),24 },
+       { IPv4(207,174,143,0),24 },
+       { IPv4(207,174,157,0),24 },
+       { IPv4(207,174,172,0),24 },
+       { IPv4(207,174,175,0),24 },
+       { IPv4(207,174,178,0),24 },
+       { IPv4(207,174,183,0),24 },
+       { IPv4(207,174,184,0),24 },
+       { IPv4(207,174,192,0),18 },
+       { IPv4(207,174,201,0),24 },
+       { IPv4(207,174,203,0),24 },
+       { IPv4(207,174,204,0),24 },
+       { IPv4(207,175,132,0),24 },
+       { IPv4(207,175,138,0),23 },
+       { IPv4(207,175,140,0),23 },
+       { IPv4(207,175,157,0),24 },
+       { IPv4(207,175,200,0),23 },
+       { IPv4(207,175,209,0),24 },
+       { IPv4(207,175,214,0),23 },
+       { IPv4(207,175,216,0),22 },
+       { IPv4(207,175,220,0),23 },
+       { IPv4(207,176,8,0),21 },
+       { IPv4(207,177,0,0),17 },
+       { IPv4(207,178,42,0),23 },
+       { IPv4(207,178,156,0),22 },
+       { IPv4(207,179,70,0),24 },
+       { IPv4(207,179,71,0),24 },
+       { IPv4(207,179,72,0),24 },
+       { IPv4(207,179,73,0),24 },
+       { IPv4(207,179,74,0),24 },
+       { IPv4(207,179,75,0),24 },
+       { IPv4(207,179,76,0),24 },
+       { IPv4(207,179,91,0),24 },
+       { IPv4(207,179,96,0),20 },
+       { IPv4(207,179,96,0),24 },
+       { IPv4(207,179,174,0),24 },
+       { IPv4(207,180,64,0),19 },
+       { IPv4(207,180,124,0),24 },
+       { IPv4(207,180,128,0),18 },
+       { IPv4(207,180,192,0),18 },
+       { IPv4(207,180,206,0),24 },
+       { IPv4(207,181,64,0),19 },
+       { IPv4(207,181,64,0),18 },
+       { IPv4(207,181,69,0),24 },
+       { IPv4(207,181,70,0),24 },
+       { IPv4(207,181,76,0),22 },
+       { IPv4(207,181,80,0),20 },
+       { IPv4(207,181,96,0),19 },
+       { IPv4(207,181,96,0),21 },
+       { IPv4(207,181,104,0),22 },
+       { IPv4(207,181,108,0),23 },
+       { IPv4(207,181,112,0),20 },
+       { IPv4(207,181,113,0),24 },
+       { IPv4(207,181,114,0),24 },
+       { IPv4(207,181,118,0),24 },
+       { IPv4(207,181,126,0),24 },
+       { IPv4(207,181,192,0),18 },
+       { IPv4(207,182,101,0),24 },
+       { IPv4(207,182,103,0),24 },
+       { IPv4(207,182,114,0),24 },
+       { IPv4(207,182,116,0),24 },
+       { IPv4(207,182,118,0),24 },
+       { IPv4(207,182,119,0),24 },
+       { IPv4(207,182,120,0),24 },
+       { IPv4(207,182,123,0),24 },
+       { IPv4(207,182,160,0),19 },
+       { IPv4(207,182,192,0),24 },
+       { IPv4(207,182,207,0),24 },
+       { IPv4(207,182,224,0),19 },
+       { IPv4(207,183,32,0),19 },
+       { IPv4(207,183,32,0),24 },
+       { IPv4(207,183,96,0),20 },
+       { IPv4(207,183,116,0),24 },
+       { IPv4(207,183,117,0),24 },
+       { IPv4(207,188,0,0),19 },
+       { IPv4(207,188,192,0),19 },
+       { IPv4(207,189,64,0),19 },
+       { IPv4(207,189,143,0),24 },
+       { IPv4(207,189,192,0),19 },
+       { IPv4(207,190,128,0),19 },
+       { IPv4(207,192,192,0),19 },
+       { IPv4(207,196,0,0),17 },
+       { IPv4(207,196,28,0),24 },
+       { IPv4(207,196,81,0),24 },
+       { IPv4(207,197,128,0),17 },
+       { IPv4(207,198,11,0),24 },
+       { IPv4(207,198,12,0),23 },
+       { IPv4(207,198,14,0),24 },
+       { IPv4(207,198,16,0),22 },
+       { IPv4(207,198,21,0),24 },
+       { IPv4(207,198,22,0),23 },
+       { IPv4(207,198,39,0),24 },
+       { IPv4(207,198,40,0),22 },
+       { IPv4(207,198,44,0),23 },
+       { IPv4(207,198,128,0),17 },
+       { IPv4(207,199,0,0),17 },
+       { IPv4(207,199,33,0),24 },
+       { IPv4(207,199,128,0),18 },
+       { IPv4(207,199,252,0),23 },
+       { IPv4(207,200,64,0),19 },
+       { IPv4(207,200,132,0),22 },
+       { IPv4(207,200,136,0),22 },
+       { IPv4(207,200,142,0),24 },
+       { IPv4(207,201,61,0),24 },
+       { IPv4(207,201,62,0),24 },
+       { IPv4(207,201,128,0),18 },
+       { IPv4(207,201,192,0),18 },
+       { IPv4(207,202,0,0),17 },
+       { IPv4(207,204,8,0),21 },
+       { IPv4(207,204,166,0),24 },
+       { IPv4(207,204,168,0),24 },
+       { IPv4(207,204,169,0),24 },
+       { IPv4(207,204,170,0),24 },
+       { IPv4(207,204,192,0),20 },
+       { IPv4(207,204,194,0),24 },
+       { IPv4(207,204,195,0),24 },
+       { IPv4(207,204,198,0),24 },
+       { IPv4(207,204,202,0),24 },
+       { IPv4(207,204,222,0),24 },
+       { IPv4(207,204,248,0),24 },
+       { IPv4(207,206,0,0),17 },
+       { IPv4(207,207,0,0),18 },
+       { IPv4(207,207,128,0),18 },
+       { IPv4(207,207,160,0),21 },
+       { IPv4(207,207,192,0),18 },
+       { IPv4(207,208,0,0),16 },
+       { IPv4(207,208,32,0),24 },
+       { IPv4(207,209,17,0),24 },
+       { IPv4(207,209,40,0),24 },
+       { IPv4(207,209,88,0),24 },
+       { IPv4(207,209,96,0),24 },
+       { IPv4(207,209,98,0),24 },
+       { IPv4(207,209,181,0),24 },
+       { IPv4(207,209,182,0),24 },
+       { IPv4(207,209,250,0),24 },
+       { IPv4(207,211,0,0),16 },
+       { IPv4(207,211,1,0),24 },
+       { IPv4(207,211,2,0),24 },
+       { IPv4(207,211,4,0),24 },
+       { IPv4(207,211,22,0),23 },
+       { IPv4(207,211,35,0),24 },
+       { IPv4(207,211,36,0),24 },
+       { IPv4(207,211,79,0),24 },
+       { IPv4(207,211,80,0),20 },
+       { IPv4(207,211,107,0),24 },
+       { IPv4(207,211,150,0),24 },
+       { IPv4(207,211,152,0),24 },
+       { IPv4(207,211,160,0),24 },
+       { IPv4(207,211,188,0),24 },
+       { IPv4(207,211,208,0),21 },
+       { IPv4(207,211,220,0),24 },
+       { IPv4(207,211,221,0),24 },
+       { IPv4(207,211,222,0),24 },
+       { IPv4(207,211,223,0),24 },
+       { IPv4(207,211,228,0),23 },
+       { IPv4(207,211,230,0),24 },
+       { IPv4(207,211,243,0),24 },
+       { IPv4(207,211,248,0),24 },
+       { IPv4(207,212,3,0),24 },
+       { IPv4(207,212,112,0),21 },
+       { IPv4(207,212,169,0),24 },
+       { IPv4(207,213,32,0),23 },
+       { IPv4(207,213,160,0),20 },
+       { IPv4(207,213,246,0),24 },
+       { IPv4(207,214,60,0),23 },
+       { IPv4(207,217,0,0),16 },
+       { IPv4(207,217,15,0),24 },
+       { IPv4(207,218,46,0),24 },
+       { IPv4(207,220,0,0),14 },
+       { IPv4(207,220,23,0),24 },
+       { IPv4(207,221,18,0),24 },
+       { IPv4(207,221,22,0),23 },
+       { IPv4(207,222,18,0),23 },
+       { IPv4(207,222,44,0),24 },
+       { IPv4(207,222,45,0),24 },
+       { IPv4(207,222,46,0),24 },
+       { IPv4(207,222,83,0),24 },
+       { IPv4(207,222,124,0),24 },
+       { IPv4(207,222,159,0),24 },
+       { IPv4(207,222,161,0),24 },
+       { IPv4(207,222,169,0),24 },
+       { IPv4(207,222,175,0),24 },
+       { IPv4(207,223,98,0),24 },
+       { IPv4(207,223,154,0),24 },
+       { IPv4(207,223,160,0),24 },
+       { IPv4(207,223,208,0),24 },
+       { IPv4(207,224,223,0),24 },
+       { IPv4(207,225,253,0),24 },
+       { IPv4(207,226,140,0),22 },
+       { IPv4(207,226,160,0),19 },
+       { IPv4(207,227,24,0),21 },
+       { IPv4(207,227,81,0),24 },
+       { IPv4(207,227,96,0),21 },
+       { IPv4(207,227,192,0),21 },
+       { IPv4(207,227,204,0),22 },
+       { IPv4(207,227,204,0),24 },
+       { IPv4(207,227,206,0),24 },
+       { IPv4(207,228,224,0),19 },
+       { IPv4(207,229,64,0),18 },
+       { IPv4(207,229,71,0),24 },
+       { IPv4(207,229,72,0),22 },
+       { IPv4(207,229,128,0),18 },
+       { IPv4(207,229,188,0),22 },
+       { IPv4(207,229,192,0),18 },
+       { IPv4(207,230,20,0),22 },
+       { IPv4(207,230,24,0),22 },
+       { IPv4(207,230,32,0),19 },
+       { IPv4(207,230,56,0),24 },
+       { IPv4(207,230,141,0),24 },
+       { IPv4(207,230,143,0),24 },
+       { IPv4(207,230,144,0),24 },
+       { IPv4(207,230,148,0),24 },
+       { IPv4(207,230,150,0),24 },
+       { IPv4(207,230,151,0),24 },
+       { IPv4(207,230,152,0),24 },
+       { IPv4(207,230,153,0),24 },
+       { IPv4(207,230,154,0),24 },
+       { IPv4(207,230,155,0),24 },
+       { IPv4(207,230,156,0),24 },
+       { IPv4(207,230,157,0),24 },
+       { IPv4(207,230,158,0),24 },
+       { IPv4(207,230,159,0),24 },
+       { IPv4(207,230,160,0),19 },
+       { IPv4(207,230,224,0),19 },
+       { IPv4(207,231,96,0),19 },
+       { IPv4(207,231,128,0),19 },
+       { IPv4(207,232,128,0),17 },
+       { IPv4(207,233,0,0),17 },
+       { IPv4(207,234,128,0),17 },
+       { IPv4(207,234,138,0),23 },
+       { IPv4(207,234,140,0),23 },
+       { IPv4(207,234,142,0),24 },
+       { IPv4(207,234,163,0),24 },
+       { IPv4(207,234,169,0),24 },
+       { IPv4(207,234,171,0),24 },
+       { IPv4(207,234,173,0),24 },
+       { IPv4(207,234,174,0),23 },
+       { IPv4(207,234,176,0),24 },
+       { IPv4(207,234,178,0),23 },
+       { IPv4(207,234,180,0),24 },
+       { IPv4(207,234,199,0),24 },
+       { IPv4(207,234,223,0),24 },
+       { IPv4(207,234,232,0),23 },
+       { IPv4(207,234,248,0),24 },
+       { IPv4(207,234,252,0),24 },
+       { IPv4(207,235,4,0),22 },
+       { IPv4(207,235,16,0),23 },
+       { IPv4(207,237,0,0),16 },
+       { IPv4(207,239,118,0),24 },
+       { IPv4(207,239,150,0),24 },
+       { IPv4(207,239,204,0),22 },
+       { IPv4(207,239,212,0),22 },
+       { IPv4(207,239,220,0),23 },
+       { IPv4(207,239,220,0),22 },
+       { IPv4(207,239,222,0),23 },
+       { IPv4(207,239,226,0),24 },
+       { IPv4(207,239,230,0),24 },
+       { IPv4(207,239,240,0),21 },
+       { IPv4(207,241,0,0),17 },
+       { IPv4(207,241,20,0),24 },
+       { IPv4(207,241,160,0),19 },
+       { IPv4(207,241,192,0),24 },
+       { IPv4(207,241,193,0),24 },
+       { IPv4(207,241,194,0),24 },
+       { IPv4(207,241,195,0),24 },
+       { IPv4(207,241,196,0),24 },
+       { IPv4(207,241,198,0),24 },
+       { IPv4(207,241,200,0),24 },
+       { IPv4(207,241,201,0),24 },
+       { IPv4(207,241,202,0),24 },
+       { IPv4(207,241,203,0),24 },
+       { IPv4(207,241,204,0),24 },
+       { IPv4(207,241,205,0),24 },
+       { IPv4(207,241,206,0),24 },
+       { IPv4(207,241,207,0),24 },
+       { IPv4(207,241,240,0),20 },
+       { IPv4(207,242,0,0),15 },
+       { IPv4(207,242,16,0),20 },
+       { IPv4(207,242,205,0),24 },
+       { IPv4(207,242,244,0),24 },
+       { IPv4(207,243,23,0),24 },
+       { IPv4(207,243,58,0),24 },
+       { IPv4(207,243,59,0),24 },
+       { IPv4(207,243,120,0),22 },
+       { IPv4(207,243,145,0),24 },
+       { IPv4(207,243,146,0),24 },
+       { IPv4(207,243,245,0),24 },
+       { IPv4(207,244,0,0),24 },
+       { IPv4(207,244,0,0),18 },
+       { IPv4(207,244,1,0),24 },
+       { IPv4(207,244,2,0),24 },
+       { IPv4(207,244,5,0),24 },
+       { IPv4(207,244,7,0),24 },
+       { IPv4(207,244,8,0),24 },
+       { IPv4(207,244,10,0),24 },
+       { IPv4(207,244,11,0),24 },
+       { IPv4(207,244,12,0),24 },
+       { IPv4(207,244,13,0),24 },
+       { IPv4(207,244,14,0),24 },
+       { IPv4(207,244,15,0),24 },
+       { IPv4(207,244,18,0),24 },
+       { IPv4(207,244,20,0),24 },
+       { IPv4(207,244,23,0),24 },
+       { IPv4(207,244,25,0),24 },
+       { IPv4(207,244,32,0),24 },
+       { IPv4(207,244,33,0),24 },
+       { IPv4(207,244,34,0),24 },
+       { IPv4(207,244,35,0),24 },
+       { IPv4(207,244,36,0),24 },
+       { IPv4(207,244,37,0),24 },
+       { IPv4(207,244,38,0),24 },
+       { IPv4(207,244,39,0),24 },
+       { IPv4(207,244,40,0),24 },
+       { IPv4(207,244,42,0),24 },
+       { IPv4(207,244,46,0),24 },
+       { IPv4(207,244,47,0),24 },
+       { IPv4(207,244,49,0),24 },
+       { IPv4(207,244,56,0),24 },
+       { IPv4(207,244,57,0),24 },
+       { IPv4(207,244,58,0),24 },
+       { IPv4(207,244,59,0),24 },
+       { IPv4(207,244,60,0),24 },
+       { IPv4(207,244,61,0),24 },
+       { IPv4(207,244,62,0),24 },
+       { IPv4(207,244,63,0),24 },
+       { IPv4(207,245,0,0),18 },
+       { IPv4(207,245,64,0),18 },
+       { IPv4(207,245,136,0),24 },
+       { IPv4(207,245,138,0),24 },
+       { IPv4(207,245,192,0),18 },
+       { IPv4(207,245,216,0),24 },
+       { IPv4(207,245,243,0),24 },
+       { IPv4(207,245,244,0),23 },
+       { IPv4(207,246,0,0),18 },
+       { IPv4(207,246,0,0),19 },
+       { IPv4(207,246,64,0),18 },
+       { IPv4(207,246,160,0),19 },
+       { IPv4(207,246,208,0),24 },
+       { IPv4(207,246,209,0),24 },
+       { IPv4(207,246,224,0),19 },
+       { IPv4(207,248,0,0),19 },
+       { IPv4(207,248,40,0),21 },
+       { IPv4(207,248,56,0),21 },
+       { IPv4(207,248,66,0),24 },
+       { IPv4(207,248,88,0),22 },
+       { IPv4(207,248,88,0),24 },
+       { IPv4(207,248,89,0),24 },
+       { IPv4(207,248,90,0),24 },
+       { IPv4(207,248,91,0),24 },
+       { IPv4(207,248,96,0),24 },
+       { IPv4(207,248,118,0),24 },
+       { IPv4(207,248,128,0),19 },
+       { IPv4(207,248,144,0),24 },
+       { IPv4(207,248,178,0),24 },
+       { IPv4(207,248,197,0),24 },
+       { IPv4(207,248,224,0),20 },
+       { IPv4(207,248,224,0),19 },
+       { IPv4(207,248,240,0),20 },
+       { IPv4(207,249,32,0),19 },
+       { IPv4(207,249,64,0),19 },
+       { IPv4(207,250,0,0),17 },
+       { IPv4(207,250,13,0),24 },
+       { IPv4(207,250,49,0),24 },
+       { IPv4(207,250,74,0),24 },
+       { IPv4(207,250,128,0),17 },
+       { IPv4(207,250,143,0),24 },
+       { IPv4(207,250,163,0),24 },
+       { IPv4(207,250,166,0),24 },
+       { IPv4(207,250,169,0),24 },
+       { IPv4(207,250,177,0),24 },
+       { IPv4(207,250,185,0),24 },
+       { IPv4(207,250,191,0),24 },
+       { IPv4(207,250,209,0),24 },
+       { IPv4(207,251,112,0),23 },
+       { IPv4(207,251,120,0),23 },
+       { IPv4(207,252,0,0),16 },
+       { IPv4(207,252,0,0),22 },
+       { IPv4(207,252,5,0),24 },
+       { IPv4(207,252,20,0),23 },
+       { IPv4(207,252,22,0),24 },
+       { IPv4(207,252,28,0),24 },
+       { IPv4(207,252,60,0),22 },
+       { IPv4(207,252,72,0),21 },
+       { IPv4(207,252,96,0),24 },
+       { IPv4(207,252,104,0),23 },
+       { IPv4(207,252,121,0),24 },
+       { IPv4(207,252,127,0),24 },
+       { IPv4(207,252,144,0),22 },
+       { IPv4(207,252,152,0),21 },
+       { IPv4(207,252,152,0),24 },
+       { IPv4(207,252,153,0),24 },
+       { IPv4(207,252,154,0),24 },
+       { IPv4(207,252,155,0),24 },
+       { IPv4(207,252,156,0),24 },
+       { IPv4(207,252,157,0),24 },
+       { IPv4(207,252,158,0),24 },
+       { IPv4(207,252,159,0),24 },
+       { IPv4(207,252,161,0),24 },
+       { IPv4(207,252,165,0),24 },
+       { IPv4(207,252,196,0),22 },
+       { IPv4(207,252,208,0),21 },
+       { IPv4(207,253,0,0),16 },
+       { IPv4(207,254,0,0),17 },
+       { IPv4(207,254,112,0),20 },
+       { IPv4(208,0,8,0),24 },
+       { IPv4(208,0,32,0),20 },
+       { IPv4(208,0,48,0),24 },
+       { IPv4(208,0,80,0),20 },
+       { IPv4(208,0,96,0),21 },
+       { IPv4(208,0,192,0),20 },
+       { IPv4(208,1,7,0),24 },
+       { IPv4(208,1,56,0),23 },
+       { IPv4(208,1,127,0),24 },
+       { IPv4(208,1,140,0),22 },
+       { IPv4(208,1,190,0),24 },
+       { IPv4(208,1,224,0),24 },
+       { IPv4(208,1,232,0),24 },
+       { IPv4(208,2,96,0),20 },
+       { IPv4(208,2,153,0),24 },
+       { IPv4(208,2,182,0),24 },
+       { IPv4(208,2,204,0),24 },
+       { IPv4(208,2,250,0),23 },
+       { IPv4(208,3,32,0),24 },
+       { IPv4(208,3,34,0),23 },
+       { IPv4(208,3,38,0),24 },
+       { IPv4(208,3,45,0),24 },
+       { IPv4(208,3,134,0),24 },
+       { IPv4(208,3,164,0),22 },
+       { IPv4(208,3,212,0),24 },
+       { IPv4(208,3,248,0),22 },
+       { IPv4(208,4,8,0),23 },
+       { IPv4(208,4,48,0),24 },
+       { IPv4(208,4,64,0),23 },
+       { IPv4(208,4,179,0),24 },
+       { IPv4(208,4,240,0),20 },
+       { IPv4(208,5,24,0),21 },
+       { IPv4(208,5,34,0),24 },
+       { IPv4(208,5,35,0),24 },
+       { IPv4(208,5,36,0),24 },
+       { IPv4(208,5,48,0),22 },
+       { IPv4(208,5,176,0),20 },
+       { IPv4(208,5,192,0),24 },
+       { IPv4(208,5,235,0),24 },
+       { IPv4(208,6,68,0),22 },
+       { IPv4(208,6,128,0),19 },
+       { IPv4(208,7,62,0),24 },
+       { IPv4(208,7,208,0),22 },
+       { IPv4(208,8,16,0),21 },
+       { IPv4(208,8,49,0),24 },
+       { IPv4(208,8,80,0),24 },
+       { IPv4(208,9,194,0),24 },
+       { IPv4(208,9,196,0),24 },
+       { IPv4(208,9,212,0),24 },
+       { IPv4(208,10,25,0),24 },
+       { IPv4(208,10,235,0),24 },
+       { IPv4(208,10,236,0),23 },
+       { IPv4(208,10,246,0),23 },
+       { IPv4(208,11,24,0),21 },
+       { IPv4(208,11,224,0),24 },
+       { IPv4(208,12,0,0),19 },
+       { IPv4(208,12,32,0),19 },
+       { IPv4(208,12,104,0),24 },
+       { IPv4(208,12,166,0),24 },
+       { IPv4(208,12,168,0),22 },
+       { IPv4(208,13,0,0),19 },
+       { IPv4(208,13,36,0),23 },
+       { IPv4(208,13,96,0),19 },
+       { IPv4(208,13,146,0),24 },
+       { IPv4(208,13,157,0),24 },
+       { IPv4(208,13,174,0),24 },
+       { IPv4(208,14,219,0),24 },
+       { IPv4(208,15,192,0),19 },
+       { IPv4(208,16,208,0),21 },
+       { IPv4(208,17,9,0),24 },
+       { IPv4(208,17,12,0),23 },
+       { IPv4(208,17,184,0),24 },
+       { IPv4(208,17,185,0),24 },
+       { IPv4(208,17,215,0),24 },
+       { IPv4(208,18,52,0),24 },
+       { IPv4(208,18,53,0),24 },
+       { IPv4(208,19,81,0),24 },
+       { IPv4(208,19,140,0),23 },
+       { IPv4(208,19,155,0),24 },
+       { IPv4(208,20,148,0),24 },
+       { IPv4(208,20,151,0),24 },
+       { IPv4(208,20,232,0),24 },
+       { IPv4(208,20,236,0),24 },
+       { IPv4(208,21,22,0),24 },
+       { IPv4(208,21,24,0),23 },
+       { IPv4(208,21,42,0),24 },
+       { IPv4(208,21,100,0),22 },
+       { IPv4(208,21,104,0),21 },
+       { IPv4(208,22,59,0),24 },
+       { IPv4(208,22,176,0),24 },
+       { IPv4(208,23,32,0),19 },
+       { IPv4(208,23,167,0),24 },
+       { IPv4(208,23,200,0),21 },
+       { IPv4(208,24,134,128),25 },
+       { IPv4(208,24,136,0),23 },
+       { IPv4(208,25,12,0),24 },
+       { IPv4(208,25,64,0),19 },
+       { IPv4(208,25,98,0),24 },
+       { IPv4(208,25,168,0),24 },
+       { IPv4(208,25,212,0),24 },
+       { IPv4(208,25,244,0),24 },
+       { IPv4(208,25,251,0),24 },
+       { IPv4(208,25,252,0),23 },
+       { IPv4(208,26,8,0),21 },
+       { IPv4(208,26,98,0),24 },
+       { IPv4(208,26,152,0),24 },
+       { IPv4(208,27,20,0),22 },
+       { IPv4(208,27,24,0),22 },
+       { IPv4(208,27,56,0),24 },
+       { IPv4(208,27,86,0),24 },
+       { IPv4(208,27,132,0),24 },
+       { IPv4(208,27,190,0),24 },
+       { IPv4(208,27,191,0),24 },
+       { IPv4(208,27,200,0),24 },
+       { IPv4(208,28,64,0),24 },
+       { IPv4(208,28,112,0),24 },
+       { IPv4(208,29,28,0),22 },
+       { IPv4(208,29,218,0),24 },
+       { IPv4(208,30,64,0),21 },
+       { IPv4(208,30,129,0),24 },
+       { IPv4(208,30,132,0),22 },
+       { IPv4(208,30,152,0),21 },
+       { IPv4(208,31,144,0),24 },
+       { IPv4(208,31,149,0),24 },
+       { IPv4(208,31,156,0),23 },
+       { IPv4(208,32,65,0),24 },
+       { IPv4(208,32,67,0),24 },
+       { IPv4(208,32,226,0),24 },
+       { IPv4(208,33,36,0),24 },
+       { IPv4(208,33,38,0),23 },
+       { IPv4(208,33,40,0),24 },
+       { IPv4(208,33,92,0),24 },
+       { IPv4(208,33,182,0),24 },
+       { IPv4(208,33,216,0),22 },
+       { IPv4(208,34,8,0),21 },
+       { IPv4(208,34,42,0),24 },
+       { IPv4(208,34,62,0),24 },
+       { IPv4(208,34,80,0),21 },
+       { IPv4(208,34,96,0),20 },
+       { IPv4(208,34,241,0),24 },
+       { IPv4(208,35,201,0),24 },
+       { IPv4(208,36,0,0),15 },
+       { IPv4(208,36,9,0),24 },
+       { IPv4(208,36,10,0),24 },
+       { IPv4(208,36,49,0),24 },
+       { IPv4(208,36,50,0),23 },
+       { IPv4(208,36,53,0),24 },
+       { IPv4(208,36,54,0),23 },
+       { IPv4(208,36,96,0),23 },
+       { IPv4(208,36,99,0),24 },
+       { IPv4(208,36,100,0),24 },
+       { IPv4(208,36,102,0),24 },
+       { IPv4(208,36,103,0),24 },
+       { IPv4(208,36,115,0),24 },
+       { IPv4(208,36,144,0),23 },
+       { IPv4(208,36,148,0),24 },
+       { IPv4(208,36,176,0),24 },
+       { IPv4(208,36,177,0),24 },
+       { IPv4(208,36,178,0),24 },
+       { IPv4(208,36,179,0),24 },
+       { IPv4(208,36,200,0),21 },
+       { IPv4(208,36,212,0),22 },
+       { IPv4(208,36,217,0),24 },
+       { IPv4(208,36,224,0),24 },
+       { IPv4(208,36,230,0),24 },
+       { IPv4(208,36,231,0),24 },
+       { IPv4(208,37,49,0),24 },
+       { IPv4(208,37,50,0),24 },
+       { IPv4(208,37,80,0),23 },
+       { IPv4(208,37,88,0),23 },
+       { IPv4(208,37,120,0),22 },
+       { IPv4(208,37,138,0),23 },
+       { IPv4(208,37,148,0),22 },
+       { IPv4(208,37,172,0),24 },
+       { IPv4(208,37,200,0),24 },
+       { IPv4(208,37,207,0),24 },
+       { IPv4(208,39,20,0),22 },
+       { IPv4(208,39,52,0),22 },
+       { IPv4(208,39,208,0),22 },
+       { IPv4(208,40,128,0),18 },
+       { IPv4(208,40,192,0),20 },
+       { IPv4(208,42,0,0),17 },
+       { IPv4(208,42,10,0),24 },
+       { IPv4(208,42,11,0),24 },
+       { IPv4(208,42,128,0),18 },
+       { IPv4(208,43,0,0),16 },
+       { IPv4(208,44,45,0),24 },
+       { IPv4(208,44,47,0),24 },
+       { IPv4(208,44,50,0),24 },
+       { IPv4(208,44,70,0),23 },
+       { IPv4(208,44,72,0),21 },
+       { IPv4(208,44,82,0),23 },
+       { IPv4(208,44,110,0),24 },
+       { IPv4(208,44,119,0),24 },
+       { IPv4(208,44,140,0),22 },
+       { IPv4(208,44,144,0),24 },
+       { IPv4(208,44,151,0),24 },
+       { IPv4(208,44,168,0),23 },
+       { IPv4(208,44,212,0),24 },
+       { IPv4(208,44,217,0),24 },
+       { IPv4(208,44,218,0),24 },
+       { IPv4(208,44,241,0),24 },
+       { IPv4(208,44,252,0),24 },
+       { IPv4(208,44,253,0),24 },
+       { IPv4(208,45,32,0),24 },
+       { IPv4(208,45,33,0),24 },
+       { IPv4(208,45,36,0),24 },
+       { IPv4(208,45,37,0),24 },
+       { IPv4(208,45,52,0),22 },
+       { IPv4(208,45,148,0),24 },
+       { IPv4(208,45,171,0),24 },
+       { IPv4(208,45,172,0),22 },
+       { IPv4(208,45,205,0),24 },
+       { IPv4(208,45,252,0),23 },
+       { IPv4(208,46,49,0),24 },
+       { IPv4(208,46,64,0),24 },
+       { IPv4(208,46,66,0),24 },
+       { IPv4(208,46,68,0),24 },
+       { IPv4(208,46,69,0),24 },
+       { IPv4(208,46,71,0),24 },
+       { IPv4(208,46,75,0),24 },
+       { IPv4(208,46,78,0),24 },
+       { IPv4(208,46,79,0),24 },
+       { IPv4(208,46,84,0),24 },
+       { IPv4(208,46,86,0),24 },
+       { IPv4(208,46,102,0),24 },
+       { IPv4(208,46,169,0),24 },
+       { IPv4(208,46,170,0),24 },
+       { IPv4(208,46,224,0),24 },
+       { IPv4(208,46,228,0),24 },
+       { IPv4(208,46,229,0),24 },
+       { IPv4(208,46,230,0),24 },
+       { IPv4(208,46,235,0),24 },
+       { IPv4(208,46,236,0),24 },
+       { IPv4(208,46,237,0),24 },
+       { IPv4(208,46,238,0),24 },
+       { IPv4(208,47,64,0),24 },
+       { IPv4(208,47,65,0),24 },
+       { IPv4(208,47,66,0),24 },
+       { IPv4(208,47,67,0),24 },
+       { IPv4(208,47,80,0),23 },
+       { IPv4(208,47,96,0),23 },
+       { IPv4(208,47,128,0),21 },
+       { IPv4(208,47,174,0),24 },
+       { IPv4(208,47,216,0),24 },
+       { IPv4(208,47,218,0),24 },
+       { IPv4(208,47,219,0),24 },
+       { IPv4(208,47,231,0),24 },
+       { IPv4(208,47,232,0),22 },
+       { IPv4(208,47,244,0),22 },
+       { IPv4(208,48,140,0),22 },
+       { IPv4(208,48,247,0),24 },
+       { IPv4(208,49,48,0),24 },
+       { IPv4(208,49,75,0),24 },
+       { IPv4(208,49,147,0),24 },
+       { IPv4(208,49,172,0),24 },
+       { IPv4(208,50,54,0),24 },
+       { IPv4(208,50,55,0),24 },
+       { IPv4(208,50,100,0),24 },
+       { IPv4(208,50,127,0),24 },
+       { IPv4(208,50,231,0),24 },
+       { IPv4(208,51,152,0),24 },
+       { IPv4(208,55,0,0),16 },
+       { IPv4(208,58,0,0),15 },
+       { IPv4(208,58,121,0),24 },
+       { IPv4(208,59,215,0),24 },
+       { IPv4(208,62,114,0),24 },
+       { IPv4(208,63,39,0),24 },
+       { IPv4(208,63,49,0),24 },
+       { IPv4(208,63,50,0),23 },
+       { IPv4(208,63,57,0),24 },
+       { IPv4(208,63,58,0),23 },
+       { IPv4(208,128,64,0),20 },
+       { IPv4(208,129,96,0),19 },
+       { IPv4(208,129,168,0),22 },
+       { IPv4(208,129,249,0),24 },
+       { IPv4(208,129,250,0),24 },
+       { IPv4(208,129,251,0),24 },
+       { IPv4(208,129,252,0),24 },
+       { IPv4(208,129,253,0),24 },
+       { IPv4(208,130,200,0),24 },
+       { IPv4(208,131,192,0),20 },
+       { IPv4(208,131,208,0),22 },
+       { IPv4(208,131,222,0),24 },
+       { IPv4(208,132,32,0),20 },
+       { IPv4(208,132,95,0),24 },
+       { IPv4(208,132,160,0),19 },
+       { IPv4(208,132,196,0),24 },
+       { IPv4(208,132,245,0),24 },
+       { IPv4(208,132,246,0),24 },
+       { IPv4(208,133,5,0),24 },
+       { IPv4(208,133,35,0),24 },
+       { IPv4(208,133,105,0),24 },
+       { IPv4(208,133,160,0),21 },
+       { IPv4(208,133,168,0),22 },
+       { IPv4(208,133,172,0),23 },
+       { IPv4(208,133,174,0),24 },
+       { IPv4(208,133,177,0),24 },
+       { IPv4(208,133,193,0),24 },
+       { IPv4(208,133,204,0),23 },
+       { IPv4(208,133,216,0),22 },
+       { IPv4(208,133,220,0),23 },
+       { IPv4(208,134,16,0),21 },
+       { IPv4(208,134,24,0),23 },
+       { IPv4(208,134,128,0),23 },
+       { IPv4(208,134,130,0),24 },
+       { IPv4(208,134,131,0),24 },
+       { IPv4(208,134,132,0),23 },
+       { IPv4(208,134,134,0),24 },
+       { IPv4(208,134,135,0),24 },
+       { IPv4(208,134,136,0),23 },
+       { IPv4(208,134,138,0),24 },
+       { IPv4(208,134,144,0),22 },
+       { IPv4(208,134,148,0),24 },
+       { IPv4(208,134,151,0),24 },
+       { IPv4(208,136,0,0),19 },
+       { IPv4(208,136,102,0),24 },
+       { IPv4(208,136,252,0),22 },
+       { IPv4(208,137,1,0),24 },
+       { IPv4(208,137,6,0),24 },
+       { IPv4(208,137,12,0),24 },
+       { IPv4(208,137,24,0),21 },
+       { IPv4(208,137,36,0),24 },
+       { IPv4(208,137,160,0),23 },
+       { IPv4(208,137,183,0),24 },
+       { IPv4(208,138,20,0),24 },
+       { IPv4(208,138,48,0),20 },
+       { IPv4(208,138,86,0),24 },
+       { IPv4(208,138,204,0),22 },
+       { IPv4(208,138,224,0),24 },
+       { IPv4(208,138,254,0),24 },
+       { IPv4(208,139,82,0),24 },
+       { IPv4(208,139,138,0),23 },
+       { IPv4(208,139,195,0),24 },
+       { IPv4(208,140,72,0),21 },
+       { IPv4(208,141,160,0),19 },
+       { IPv4(208,141,228,0),24 },
+       { IPv4(208,142,106,0),23 },
+       { IPv4(208,142,111,0),24 },
+       { IPv4(208,142,112,0),24 },
+       { IPv4(208,142,113,0),24 },
+       { IPv4(208,142,114,0),24 },
+       { IPv4(208,142,115,0),24 },
+       { IPv4(208,142,116,0),24 },
+       { IPv4(208,142,117,0),24 },
+       { IPv4(208,142,118,0),24 },
+       { IPv4(208,142,119,0),24 },
+       { IPv4(208,142,120,0),24 },
+       { IPv4(208,142,121,0),24 },
+       { IPv4(208,142,136,0),21 },
+       { IPv4(208,142,142,0),24 },
+       { IPv4(208,142,144,0),21 },
+       { IPv4(208,143,22,0),24 },
+       { IPv4(208,143,23,0),24 },
+       { IPv4(208,143,33,0),24 },
+       { IPv4(208,143,38,0),24 },
+       { IPv4(208,143,104,0),24 },
+       { IPv4(208,143,108,0),24 },
+       { IPv4(208,143,109,0),24 },
+       { IPv4(208,144,90,0),24 },
+       { IPv4(208,144,112,0),21 },
+       { IPv4(208,144,196,0),24 },
+       { IPv4(208,144,224,0),24 },
+       { IPv4(208,144,228,0),24 },
+       { IPv4(208,144,230,0),24 },
+       { IPv4(208,144,235,0),24 },
+       { IPv4(208,145,0,0),24 },
+       { IPv4(208,145,18,0),24 },
+       { IPv4(208,145,120,0),24 },
+       { IPv4(208,145,121,0),24 },
+       { IPv4(208,145,122,0),24 },
+       { IPv4(208,145,126,0),24 },
+       { IPv4(208,145,127,0),24 },
+       { IPv4(208,146,32,0),20 },
+       { IPv4(208,146,40,0),23 },
+       { IPv4(208,146,142,0),24 },
+       { IPv4(208,146,208,0),22 },
+       { IPv4(208,146,248,0),21 },
+       { IPv4(208,147,18,0),24 },
+       { IPv4(208,147,64,0),24 },
+       { IPv4(208,147,88,0),21 },
+       { IPv4(208,147,144,0),21 },
+       { IPv4(208,147,152,0),22 },
+       { IPv4(208,148,32,0),24 },
+       { IPv4(208,148,34,0),24 },
+       { IPv4(208,148,35,0),24 },
+       { IPv4(208,148,37,0),24 },
+       { IPv4(208,148,38,0),24 },
+       { IPv4(208,148,39,0),24 },
+       { IPv4(208,148,50,0),23 },
+       { IPv4(208,148,52,0),23 },
+       { IPv4(208,148,74,0),24 },
+       { IPv4(208,148,76,0),22 },
+       { IPv4(208,149,80,0),20 },
+       { IPv4(208,149,169,0),24 },
+       { IPv4(208,151,96,0),22 },
+       { IPv4(208,151,96,0),19 },
+       { IPv4(208,151,100,0),22 },
+       { IPv4(208,151,104,0),21 },
+       { IPv4(208,151,112,0),21 },
+       { IPv4(208,151,120,0),23 },
+       { IPv4(208,151,122,0),23 },
+       { IPv4(208,151,124,0),22 },
+       { IPv4(208,151,216,0),22 },
+       { IPv4(208,152,0,0),22 },
+       { IPv4(208,152,4,0),22 },
+       { IPv4(208,152,8,0),23 },
+       { IPv4(208,152,28,0),22 },
+       { IPv4(208,152,32,0),19 },
+       { IPv4(208,152,112,0),22 },
+       { IPv4(208,152,116,0),23 },
+       { IPv4(208,152,118,0),24 },
+       { IPv4(208,152,120,0),21 },
+       { IPv4(208,152,153,0),24 },
+       { IPv4(208,152,184,0),21 },
+       { IPv4(208,152,204,0),22 },
+       { IPv4(208,153,32,0),21 },
+       { IPv4(208,153,75,0),24 },
+       { IPv4(208,153,82,0),23 },
+       { IPv4(208,153,84,0),24 },
+       { IPv4(208,153,136,0),24 },
+       { IPv4(208,153,137,0),24 },
+       { IPv4(208,153,138,0),24 },
+       { IPv4(208,153,140,0),24 },
+       { IPv4(208,153,141,0),24 },
+       { IPv4(208,153,228,0),24 },
+       { IPv4(208,154,0,0),22 },
+       { IPv4(208,154,56,0),24 },
+       { IPv4(208,154,80,0),23 },
+       { IPv4(208,154,96,0),19 },
+       { IPv4(208,154,154,0),24 },
+       { IPv4(208,154,199,0),24 },
+       { IPv4(208,155,180,0),24 },
+       { IPv4(208,155,181,0),24 },
+       { IPv4(208,155,182,0),24 },
+       { IPv4(208,155,183,0),24 },
+       { IPv4(208,155,184,0),24 },
+       { IPv4(208,155,185,0),24 },
+       { IPv4(208,155,186,0),24 },
+       { IPv4(208,155,187,0),24 },
+       { IPv4(208,155,188,0),24 },
+       { IPv4(208,155,189,0),24 },
+       { IPv4(208,155,190,0),24 },
+       { IPv4(208,156,21,0),24 },
+       { IPv4(208,156,22,0),23 },
+       { IPv4(208,157,120,0),24 },
+       { IPv4(208,157,122,0),23 },
+       { IPv4(208,157,136,0),24 },
+       { IPv4(208,157,255,0),24 },
+       { IPv4(208,158,227,0),24 },
+       { IPv4(208,159,4,0),23 },
+       { IPv4(208,159,61,0),24 },
+       { IPv4(208,159,176,0),21 },
+       { IPv4(208,159,224,0),21 },
+       { IPv4(208,159,232,0),21 },
+       { IPv4(208,159,240,0),24 },
+       { IPv4(208,159,240,0),22 },
+       { IPv4(208,159,244,0),22 },
+       { IPv4(208,159,245,0),24 },
+       { IPv4(208,159,248,0),21 },
+       { IPv4(208,160,10,0),23 },
+       { IPv4(208,160,14,0),23 },
+       { IPv4(208,160,32,0),20 },
+       { IPv4(208,160,85,0),24 },
+       { IPv4(208,160,104,0),21 },
+       { IPv4(208,160,151,0),24 },
+       { IPv4(208,160,218,0),24 },
+       { IPv4(208,160,224,0),19 },
+       { IPv4(208,161,224,0),21 },
+       { IPv4(208,161,232,0),21 },
+       { IPv4(208,161,240,0),21 },
+       { IPv4(208,161,240,0),22 },
+       { IPv4(208,161,244,0),22 },
+       { IPv4(208,161,248,0),24 },
+       { IPv4(208,162,36,0),22 },
+       { IPv4(208,162,64,0),19 },
+       { IPv4(208,162,120,0),22 },
+       { IPv4(208,162,124,0),23 },
+       { IPv4(208,162,126,0),24 },
+       { IPv4(208,162,240,0),21 },
+       { IPv4(208,162,248,0),22 },
+       { IPv4(208,162,252,0),24 },
+       { IPv4(208,162,254,0),24 },
+       { IPv4(208,163,72,0),24 },
+       { IPv4(208,163,73,0),24 },
+       { IPv4(208,163,74,0),24 },
+       { IPv4(208,163,75,0),24 },
+       { IPv4(208,163,76,0),24 },
+       { IPv4(208,163,77,0),24 },
+       { IPv4(208,163,78,0),24 },
+       { IPv4(208,163,79,0),24 },
+       { IPv4(208,163,96,0),21 },
+       { IPv4(208,163,104,0),21 },
+       { IPv4(208,163,112,0),20 },
+       { IPv4(208,163,164,0),24 },
+       { IPv4(208,163,192,0),22 },
+       { IPv4(208,163,196,0),22 },
+       { IPv4(208,163,200,0),22 },
+       { IPv4(208,163,204,0),22 },
+       { IPv4(208,163,208,0),22 },
+       { IPv4(208,163,208,0),24 },
+       { IPv4(208,163,216,0),22 },
+       { IPv4(208,163,224,0),22 },
+       { IPv4(208,163,228,0),22 },
+       { IPv4(208,163,232,0),22 },
+       { IPv4(208,163,236,0),22 },
+       { IPv4(208,163,240,0),21 },
+       { IPv4(208,163,248,0),21 },
+       { IPv4(208,164,0,0),21 },
+       { IPv4(208,164,6,0),23 },
+       { IPv4(208,164,8,0),21 },
+       { IPv4(208,164,16,0),24 },
+       { IPv4(208,164,16,0),21 },
+       { IPv4(208,164,24,0),21 },
+       { IPv4(208,164,232,0),24 },
+       { IPv4(208,164,233,0),24 },
+       { IPv4(208,164,234,0),24 },
+       { IPv4(208,165,16,0),24 },
+       { IPv4(208,165,32,0),21 },
+       { IPv4(208,165,32,0),20 },
+       { IPv4(208,165,64,0),20 },
+       { IPv4(208,165,80,0),21 },
+       { IPv4(208,165,88,0),21 },
+       { IPv4(208,165,112,0),22 },
+       { IPv4(208,165,116,0),23 },
+       { IPv4(208,165,144,0),20 },
+       { IPv4(208,166,64,0),21 },
+       { IPv4(208,166,120,0),21 },
+       { IPv4(208,166,153,0),24 },
+       { IPv4(208,166,176,0),20 },
+       { IPv4(208,166,235,0),24 },
+       { IPv4(208,166,237,0),24 },
+       { IPv4(208,167,72,0),21 },
+       { IPv4(208,167,128,0),20 },
+       { IPv4(208,167,184,0),22 },
+       { IPv4(208,168,4,0),22 },
+       { IPv4(208,168,16,0),24 },
+       { IPv4(208,168,17,0),24 },
+       { IPv4(208,168,18,0),24 },
+       { IPv4(208,168,19,0),24 },
+       { IPv4(208,168,120,0),21 },
+       { IPv4(208,168,152,0),22 },
+       { IPv4(208,168,174,0),23 },
+       { IPv4(208,168,176,0),22 },
+       { IPv4(208,168,177,0),24 },
+       { IPv4(208,168,211,0),24 },
+       { IPv4(208,168,213,0),24 },
+       { IPv4(208,168,215,0),24 },
+       { IPv4(208,169,0,0),24 },
+       { IPv4(208,169,4,0),22 },
+       { IPv4(208,169,16,0),21 },
+       { IPv4(208,169,32,0),22 },
+       { IPv4(208,170,96,0),20 },
+       { IPv4(208,170,112,0),20 },
+       { IPv4(208,170,152,0),22 },
+       { IPv4(208,170,156,0),23 },
+       { IPv4(208,170,168,0),21 },
+       { IPv4(208,170,170,0),24 },
+       { IPv4(208,170,240,0),22 },
+       { IPv4(208,171,80,0),22 },
+       { IPv4(208,171,120,0),21 },
+       { IPv4(208,171,146,0),24 },
+       { IPv4(208,171,147,0),24 },
+       { IPv4(208,171,151,0),24 },
+       { IPv4(208,171,152,0),24 },
+       { IPv4(208,171,153,0),24 },
+       { IPv4(208,171,154,0),24 },
+       { IPv4(208,171,155,0),24 },
+       { IPv4(208,171,157,0),24 },
+       { IPv4(208,171,158,0),24 },
+       { IPv4(208,171,159,0),24 },
+       { IPv4(208,171,212,0),24 },
+       { IPv4(208,176,0,0),15 },
+       { IPv4(208,176,50,0),24 },
+       { IPv4(208,176,58,0),23 },
+       { IPv4(208,176,72,0),22 },
+       { IPv4(208,176,80,0),20 },
+       { IPv4(208,176,99,0),24 },
+       { IPv4(208,176,100,0),24 },
+       { IPv4(208,176,105,0),24 },
+       { IPv4(208,176,106,0),23 },
+       { IPv4(208,176,161,0),24 },
+       { IPv4(208,176,162,0),23 },
+       { IPv4(208,176,224,0),21 },
+       { IPv4(208,176,232,0),22 },
+       { IPv4(208,176,236,0),22 },
+       { IPv4(208,177,1,0),24 },
+       { IPv4(208,177,25,0),24 },
+       { IPv4(208,177,27,0),24 },
+       { IPv4(208,177,32,0),22 },
+       { IPv4(208,177,40,0),22 },
+       { IPv4(208,177,44,0),24 },
+       { IPv4(208,177,67,0),24 },
+       { IPv4(208,177,74,0),23 },
+       { IPv4(208,177,76,0),23 },
+       { IPv4(208,177,177,0),24 },
+       { IPv4(208,177,192,0),21 },
+       { IPv4(208,177,200,0),22 },
+       { IPv4(208,177,245,0),24 },
+       { IPv4(208,177,248,0),22 },
+       { IPv4(208,178,52,0),23 },
+       { IPv4(208,178,74,0),23 },
+       { IPv4(208,178,130,0),24 },
+       { IPv4(208,178,237,0),24 },
+       { IPv4(208,179,0,0),16 },
+       { IPv4(208,179,16,0),24 },
+       { IPv4(208,179,60,0),24 },
+       { IPv4(208,179,91,0),24 },
+       { IPv4(208,179,193,0),24 },
+       { IPv4(208,179,194,0),24 },
+       { IPv4(208,180,0,0),19 },
+       { IPv4(208,180,32,0),20 },
+       { IPv4(208,180,48,0),21 },
+       { IPv4(208,180,56,0),22 },
+       { IPv4(208,180,60,0),22 },
+       { IPv4(208,180,64,0),21 },
+       { IPv4(208,180,72,0),24 },
+       { IPv4(208,180,73,0),24 },
+       { IPv4(208,180,74,0),23 },
+       { IPv4(208,180,76,0),24 },
+       { IPv4(208,180,77,0),24 },
+       { IPv4(208,180,78,0),24 },
+       { IPv4(208,180,79,0),24 },
+       { IPv4(208,180,80,0),20 },
+       { IPv4(208,180,96,0),20 },
+       { IPv4(208,180,112,0),21 },
+       { IPv4(208,180,120,0),21 },
+       { IPv4(208,180,128,0),20 },
+       { IPv4(208,180,144,0),21 },
+       { IPv4(208,180,152,0),21 },
+       { IPv4(208,180,160,0),20 },
+       { IPv4(208,180,176,0),21 },
+       { IPv4(208,180,184,0),22 },
+       { IPv4(208,180,188,0),22 },
+       { IPv4(208,180,192,0),21 },
+       { IPv4(208,180,200,0),22 },
+       { IPv4(208,180,204,0),22 },
+       { IPv4(208,180,208,0),21 },
+       { IPv4(208,180,216,0),21 },
+       { IPv4(208,180,224,0),20 },
+       { IPv4(208,180,240,0),21 },
+       { IPv4(208,182,0,0),17 },
+       { IPv4(208,182,128,0),19 },
+       { IPv4(208,182,160,0),20 },
+       { IPv4(208,182,176,0),22 },
+       { IPv4(208,182,180,0),23 },
+       { IPv4(208,182,182,0),23 },
+       { IPv4(208,182,184,0),21 },
+       { IPv4(208,182,192,0),18 },
+       { IPv4(208,183,0,0),17 },
+       { IPv4(208,183,128,0),19 },
+       { IPv4(208,183,160,0),19 },
+       { IPv4(208,183,192,0),19 },
+       { IPv4(208,183,224,0),20 },
+       { IPv4(208,183,240,0),20 },
+       { IPv4(208,184,0,0),15 },
+       { IPv4(208,184,7,0),24 },
+       { IPv4(208,184,29,0),24 },
+       { IPv4(208,184,40,0),22 },
+       { IPv4(208,184,40,0),24 },
+       { IPv4(208,184,41,0),24 },
+       { IPv4(208,184,42,0),24 },
+       { IPv4(208,184,43,0),24 },
+       { IPv4(208,184,46,0),24 },
+       { IPv4(208,184,153,0),24 },
+       { IPv4(208,184,189,0),24 },
+       { IPv4(208,184,190,0),23 },
+       { IPv4(208,184,216,0),24 },
+       { IPv4(208,184,219,0),24 },
+       { IPv4(208,184,227,0),24 },
+       { IPv4(208,184,252,0),22 },
+       { IPv4(208,185,7,0),24 },
+       { IPv4(208,185,33,0),24 },
+       { IPv4(208,185,35,0),24 },
+       { IPv4(208,185,43,0),24 },
+       { IPv4(208,185,44,0),24 },
+       { IPv4(208,185,45,0),24 },
+       { IPv4(208,185,48,0),23 },
+       { IPv4(208,185,74,0),24 },
+       { IPv4(208,185,98,0),23 },
+       { IPv4(208,185,109,0),24 },
+       { IPv4(208,185,112,0),24 },
+       { IPv4(208,185,114,0),23 },
+       { IPv4(208,185,129,0),24 },
+       { IPv4(208,185,130,0),24 },
+       { IPv4(208,185,143,0),24 },
+       { IPv4(208,185,169,0),24 },
+       { IPv4(208,185,186,0),24 },
+       { IPv4(208,185,204,0),24 },
+       { IPv4(208,185,205,0),24 },
+       { IPv4(208,185,220,0),24 },
+       { IPv4(208,185,221,0),24 },
+       { IPv4(208,185,222,0),23 },
+       { IPv4(208,185,224,0),23 },
+       { IPv4(208,186,104,0),21 },
+       { IPv4(208,186,108,0),24 },
+       { IPv4(208,186,109,0),24 },
+       { IPv4(208,186,110,0),24 },
+       { IPv4(208,186,111,0),24 },
+       { IPv4(208,186,224,0),24 },
+       { IPv4(208,187,190,0),24 },
+       { IPv4(208,187,194,0),24 },
+       { IPv4(208,187,214,0),23 },
+       { IPv4(208,187,218,0),24 },
+       { IPv4(208,187,219,0),24 },
+       { IPv4(208,187,224,0),22 },
+       { IPv4(208,188,184,0),21 },
+       { IPv4(208,189,32,0),20 },
+       { IPv4(208,189,96,0),20 },
+       { IPv4(208,189,103,0),24 },
+       { IPv4(208,189,208,0),21 },
+       { IPv4(208,189,210,0),24 },
+       { IPv4(208,189,216,0),21 },
+       { IPv4(208,191,32,0),20 },
+       { IPv4(208,191,48,0),20 },
+       { IPv4(208,191,62,0),24 },
+       { IPv4(208,191,128,0),20 },
+       { IPv4(208,192,3,0),24 },
+       { IPv4(208,192,14,0),24 },
+       { IPv4(208,192,32,0),21 },
+       { IPv4(208,192,86,0),24 },
+       { IPv4(208,192,120,0),24 },
+       { IPv4(208,192,208,0),22 },
+       { IPv4(208,193,14,0),24 },
+       { IPv4(208,193,15,0),24 },
+       { IPv4(208,193,53,0),24 },
+       { IPv4(208,193,120,0),21 },
+       { IPv4(208,193,132,0),24 },
+       { IPv4(208,194,74,0),23 },
+       { IPv4(208,194,97,0),24 },
+       { IPv4(208,194,157,0),24 },
+       { IPv4(208,195,104,0),21 },
+       { IPv4(208,195,255,0),24 },
+       { IPv4(208,196,96,0),19 },
+       { IPv4(208,196,168,0),24 },
+       { IPv4(208,197,4,0),24 },
+       { IPv4(208,197,8,0),24 },
+       { IPv4(208,197,35,0),24 },
+       { IPv4(208,197,70,0),24 },
+       { IPv4(208,197,116,0),23 },
+       { IPv4(208,197,246,0),24 },
+       { IPv4(208,197,247,0),24 },
+       { IPv4(208,198,0,0),22 },
+       { IPv4(208,198,4,0),22 },
+       { IPv4(208,198,224,0),24 },
+       { IPv4(208,198,224,0),21 },
+       { IPv4(208,198,225,0),24 },
+       { IPv4(208,198,226,0),24 },
+       { IPv4(208,198,240,0),22 },
+       { IPv4(208,199,168,0),21 },
+       { IPv4(208,200,136,0),21 },
+       { IPv4(208,200,180,0),24 },
+       { IPv4(208,200,181,0),24 },
+       { IPv4(208,200,185,0),24 },
+       { IPv4(208,200,214,0),24 },
+       { IPv4(208,201,44,0),22 },
+       { IPv4(208,201,64,0),21 },
+       { IPv4(208,201,73,0),24 },
+       { IPv4(208,201,108,0),24 },
+       { IPv4(208,201,179,0),24 },
+       { IPv4(208,201,200,0),21 },
+       { IPv4(208,202,77,0),24 },
+       { IPv4(208,202,104,0),24 },
+       { IPv4(208,202,107,0),24 },
+       { IPv4(208,202,128,0),21 },
+       { IPv4(208,202,218,0),23 },
+       { IPv4(208,203,47,0),24 },
+       { IPv4(208,203,56,0),21 },
+       { IPv4(208,203,56,0),22 },
+       { IPv4(208,203,112,0),23 },
+       { IPv4(208,203,201,0),24 },
+       { IPv4(208,205,104,0),21 },
+       { IPv4(208,205,120,0),24 },
+       { IPv4(208,205,124,0),24 },
+       { IPv4(208,205,125,0),24 },
+       { IPv4(208,205,126,0),24 },
+       { IPv4(208,205,127,0),24 },
+       { IPv4(208,205,136,0),24 },
+       { IPv4(208,205,144,0),20 },
+       { IPv4(208,205,160,0),20 },
+       { IPv4(208,205,176,0),24 },
+       { IPv4(208,205,240,0),22 },
+       { IPv4(208,205,244,0),22 },
+       { IPv4(208,206,84,0),24 },
+       { IPv4(208,206,85,0),24 },
+       { IPv4(208,206,229,0),24 },
+       { IPv4(208,206,231,0),24 },
+       { IPv4(208,207,64,0),24 },
+       { IPv4(208,208,91,0),24 },
+       { IPv4(208,208,104,0),21 },
+       { IPv4(208,209,28,0),24 },
+       { IPv4(208,209,29,0),24 },
+       { IPv4(208,209,30,0),24 },
+       { IPv4(208,209,31,0),24 },
+       { IPv4(208,209,38,0),24 },
+       { IPv4(208,209,160,0),22 },
+       { IPv4(208,209,210,0),24 },
+       { IPv4(208,209,232,0),24 },
+       { IPv4(208,209,252,0),22 },
+       { IPv4(208,211,80,0),24 },
+       { IPv4(208,211,104,0),21 },
+       { IPv4(208,212,64,0),20 },
+       { IPv4(208,213,56,0),21 },
+       { IPv4(208,213,126,0),24 },
+       { IPv4(208,213,144,0),20 },
+       { IPv4(208,213,229,0),24 },
+       { IPv4(208,214,18,0),24 },
+       { IPv4(208,214,40,0),22 },
+       { IPv4(208,214,216,0),21 },
+       { IPv4(208,215,200,0),24 },
+       { IPv4(208,215,201,0),24 },
+       { IPv4(208,215,236,0),24 },
+       { IPv4(208,216,39,0),24 },
+       { IPv4(208,216,80,0),21 },
+       { IPv4(208,216,91,0),24 },
+       { IPv4(208,216,180,0),22 },
+       { IPv4(208,217,21,0),24 },
+       { IPv4(208,217,71,0),24 },
+       { IPv4(208,217,74,0),24 },
+       { IPv4(208,217,166,0),24 },
+       { IPv4(208,217,167,0),24 },
+       { IPv4(208,217,198,0),24 },
+       { IPv4(208,217,208,0),20 },
+       { IPv4(208,217,238,0),24 },
+       { IPv4(208,218,122,0),24 },
+       { IPv4(208,218,128,0),21 },
+       { IPv4(208,218,210,0),24 },
+       { IPv4(208,218,214,0),24 },
+       { IPv4(208,218,215,0),24 },
+       { IPv4(208,219,48,0),24 },
+       { IPv4(208,219,49,0),24 },
+       { IPv4(208,219,74,0),24 },
+       { IPv4(208,219,112,0),20 },
+       { IPv4(208,219,128,0),19 },
+       { IPv4(208,219,220,0),24 },
+       { IPv4(208,220,100,0),24 },
+       { IPv4(208,220,180,0),24 },
+       { IPv4(208,220,181,0),24 },
+       { IPv4(208,220,192,0),19 },
+       { IPv4(208,221,72,0),21 },
+       { IPv4(208,221,192,0),24 },
+       { IPv4(208,221,193,0),24 },
+       { IPv4(208,221,194,0),24 },
+       { IPv4(208,221,195,0),24 },
+       { IPv4(208,222,120,0),21 },
+       { IPv4(208,222,150,0),23 },
+       { IPv4(208,222,244,0),24 },
+       { IPv4(208,222,245,0),24 },
+       { IPv4(208,222,252,0),24 },
+       { IPv4(208,222,253,0),24 },
+       { IPv4(208,223,76,0),24 },
+       { IPv4(208,223,208,0),23 },
+       { IPv4(208,224,87,0),24 },
+       { IPv4(208,224,122,0),24 },
+       { IPv4(208,224,224,0),24 },
+       { IPv4(208,224,225,0),24 },
+       { IPv4(208,225,40,0),24 },
+       { IPv4(208,225,187,0),24 },
+       { IPv4(208,225,239,0),24 },
+       { IPv4(208,226,36,0),24 },
+       { IPv4(208,226,37,0),24 },
+       { IPv4(208,226,38,0),24 },
+       { IPv4(208,226,39,0),24 },
+       { IPv4(208,226,120,0),22 },
+       { IPv4(208,226,130,0),23 },
+       { IPv4(208,228,160,0),20 },
+       { IPv4(208,229,54,0),24 },
+       { IPv4(208,229,121,0),24 },
+       { IPv4(208,229,240,0),24 },
+       { IPv4(208,230,56,0),24 },
+       { IPv4(208,230,58,0),23 },
+       { IPv4(208,230,128,0),20 },
+       { IPv4(208,230,194,0),24 },
+       { IPv4(208,230,196,0),24 },
+       { IPv4(208,230,197,0),24 },
+       { IPv4(208,230,244,0),24 },
+       { IPv4(208,230,250,0),24 },
+       { IPv4(208,230,251,0),24 },
+       { IPv4(208,231,60,0),24 },
+       { IPv4(208,231,128,0),22 },
+       { IPv4(208,231,162,0),24 },
+       { IPv4(208,232,142,0),24 },
+       { IPv4(208,232,142,0),23 },
+       { IPv4(208,232,245,0),24 },
+       { IPv4(208,233,88,0),21 },
+       { IPv4(208,233,112,0),21 },
+       { IPv4(208,233,124,0),23 },
+       { IPv4(208,234,0,0),19 },
+       { IPv4(208,234,120,0),22 },
+       { IPv4(208,234,168,0),24 },
+       { IPv4(208,234,169,0),24 },
+       { IPv4(208,234,192,0),23 },
+       { IPv4(208,234,218,0),23 },
+       { IPv4(208,234,252,0),24 },
+       { IPv4(208,236,170,0),24 },
+       { IPv4(208,237,33,0),24 },
+       { IPv4(208,237,56,0),22 },
+       { IPv4(208,237,80,0),22 },
+       { IPv4(208,237,88,0),23 },
+       { IPv4(208,238,43,0),24 },
+       { IPv4(208,238,44,0),24 },
+       { IPv4(208,238,45,0),24 },
+       { IPv4(208,238,46,0),24 },
+       { IPv4(208,238,47,0),24 },
+       { IPv4(208,238,126,0),23 },
+       { IPv4(208,238,144,0),21 },
+       { IPv4(208,238,228,0),24 },
+       { IPv4(208,238,229,0),24 },
+       { IPv4(208,238,230,0),24 },
+       { IPv4(208,239,116,0),22 },
+       { IPv4(208,239,159,0),24 },
+       { IPv4(208,239,169,0),24 },
+       { IPv4(208,239,172,0),22 },
+       { IPv4(208,240,76,0),23 },
+       { IPv4(208,240,128,0),21 },
+       { IPv4(208,240,240,0),22 },
+       { IPv4(208,240,252,0),22 },
+       { IPv4(208,241,0,0),22 },
+       { IPv4(208,241,48,0),22 },
+       { IPv4(208,241,152,0),21 },
+       { IPv4(208,241,166,0),23 },
+       { IPv4(208,241,190,0),24 },
+       { IPv4(208,241,191,0),24 },
+       { IPv4(208,242,0,0),24 },
+       { IPv4(208,242,1,0),24 },
+       { IPv4(208,242,62,0),24 },
+       { IPv4(208,242,63,0),24 },
+       { IPv4(208,242,114,0),23 },
+       { IPv4(208,243,4,0),22 },
+       { IPv4(208,243,98,0),23 },
+       { IPv4(208,243,100,0),22 },
+       { IPv4(208,244,39,0),24 },
+       { IPv4(208,244,82,0),23 },
+       { IPv4(208,244,88,0),21 },
+       { IPv4(208,244,111,0),24 },
+       { IPv4(208,244,118,0),23 },
+       { IPv4(208,244,140,0),22 },
+       { IPv4(208,244,174,0),23 },
+       { IPv4(208,244,246,0),23 },
+       { IPv4(208,245,36,0),23 },
+       { IPv4(208,245,86,0),24 },
+       { IPv4(208,245,128,0),24 },
+       { IPv4(208,245,132,0),22 },
+       { IPv4(208,245,232,0),24 },
+       { IPv4(208,245,248,0),21 },
+       { IPv4(208,246,83,0),24 },
+       { IPv4(208,246,134,0),24 },
+       { IPv4(208,246,144,224),27 },
+       { IPv4(208,246,164,0),23 },
+       { IPv4(208,246,215,0),24 },
+       { IPv4(208,247,17,0),24 },
+       { IPv4(208,247,100,0),24 },
+       { IPv4(208,247,121,0),24 },
+       { IPv4(208,247,129,0),24 },
+       { IPv4(208,247,208,0),24 },
+       { IPv4(208,247,248,0),22 },
+       { IPv4(208,248,33,0),24 },
+       { IPv4(208,248,77,0),24 },
+       { IPv4(208,248,108,0),23 },
+       { IPv4(208,248,128,0),20 },
+       { IPv4(208,248,186,0),23 },
+       { IPv4(208,248,192,0),24 },
+       { IPv4(208,248,193,0),24 },
+       { IPv4(208,248,194,0),24 },
+       { IPv4(208,248,195,0),24 },
+       { IPv4(208,248,242,0),24 },
+       { IPv4(208,249,36,0),24 },
+       { IPv4(208,249,116,0),24 },
+       { IPv4(208,249,117,0),24 },
+       { IPv4(208,249,206,0),24 },
+       { IPv4(208,251,67,0),24 },
+       { IPv4(208,251,90,0),23 },
+       { IPv4(208,251,159,0),24 },
+       { IPv4(208,252,24,0),24 },
+       { IPv4(208,252,201,0),24 },
+       { IPv4(208,253,72,0),21 },
+       { IPv4(208,254,155,0),24 },
+       { IPv4(208,255,140,0),24 },
+       { IPv4(208,255,152,0),21 },
+       { IPv4(208,255,181,0),24 },
+       { IPv4(208,255,225,0),24 },
+       { IPv4(209,1,23,0),24 },
+       { IPv4(209,1,108,0),22 },
+       { IPv4(209,1,112,0),24 },
+       { IPv4(209,1,113,0),24 },
+       { IPv4(209,1,128,0),24 },
+       { IPv4(209,2,0,0),16 },
+       { IPv4(209,2,36,0),22 },
+       { IPv4(209,2,40,0),24 },
+       { IPv4(209,2,47,0),24 },
+       { IPv4(209,2,48,0),24 },
+       { IPv4(209,2,49,0),24 },
+       { IPv4(209,2,50,0),24 },
+       { IPv4(209,2,51,0),24 },
+       { IPv4(209,2,68,0),22 },
+       { IPv4(209,2,90,0),24 },
+       { IPv4(209,2,92,0),24 },
+       { IPv4(209,2,93,0),24 },
+       { IPv4(209,2,105,0),24 },
+       { IPv4(209,2,125,0),24 },
+       { IPv4(209,2,128,0),24 },
+       { IPv4(209,2,129,0),24 },
+       { IPv4(209,2,130,0),24 },
+       { IPv4(209,2,131,0),24 },
+       { IPv4(209,2,132,0),24 },
+       { IPv4(209,2,133,0),24 },
+       { IPv4(209,2,138,0),24 },
+       { IPv4(209,2,139,0),24 },
+       { IPv4(209,2,143,0),24 },
+       { IPv4(209,2,146,0),24 },
+       { IPv4(209,2,156,0),24 },
+       { IPv4(209,2,160,0),21 },
+       { IPv4(209,2,185,0),24 },
+       { IPv4(209,2,187,0),24 },
+       { IPv4(209,2,208,0),24 },
+       { IPv4(209,2,209,0),24 },
+       { IPv4(209,2,210,0),24 },
+       { IPv4(209,2,211,0),24 },
+       { IPv4(209,2,212,0),24 },
+       { IPv4(209,2,213,0),24 },
+       { IPv4(209,2,216,0),24 },
+       { IPv4(209,2,217,0),24 },
+       { IPv4(209,2,218,0),24 },
+       { IPv4(209,2,219,0),24 },
+       { IPv4(209,2,220,0),24 },
+       { IPv4(209,2,221,0),24 },
+       { IPv4(209,2,222,0),24 },
+       { IPv4(209,2,223,0),24 },
+       { IPv4(209,2,224,0),24 },
+       { IPv4(209,2,225,0),24 },
+       { IPv4(209,2,226,0),24 },
+       { IPv4(209,2,227,0),24 },
+       { IPv4(209,2,228,0),24 },
+       { IPv4(209,2,229,0),24 },
+       { IPv4(209,2,230,0),24 },
+       { IPv4(209,2,231,0),24 },
+       { IPv4(209,2,232,0),24 },
+       { IPv4(209,2,233,0),24 },
+       { IPv4(209,2,234,0),24 },
+       { IPv4(209,2,235,0),24 },
+       { IPv4(209,2,236,0),24 },
+       { IPv4(209,2,237,0),24 },
+       { IPv4(209,2,238,0),24 },
+       { IPv4(209,2,239,0),24 },
+       { IPv4(209,2,253,0),24 },
+       { IPv4(209,2,254,0),24 },
+       { IPv4(209,3,118,0),24 },
+       { IPv4(209,3,198,0),24 },
+       { IPv4(209,4,228,0),24 },
+       { IPv4(209,4,250,0),23 },
+       { IPv4(209,4,252,0),23 },
+       { IPv4(209,4,254,0),23 },
+       { IPv4(209,6,0,0),16 },
+       { IPv4(209,6,161,0),24 },
+       { IPv4(209,7,0,0),16 },
+       { IPv4(209,8,48,0),22 },
+       { IPv4(209,8,80,0),23 },
+       { IPv4(209,8,192,0),22 },
+       { IPv4(209,10,0,0),16 },
+       { IPv4(209,10,0,0),19 },
+       { IPv4(209,10,14,0),23 },
+       { IPv4(209,10,16,0),22 },
+       { IPv4(209,10,16,0),24 },
+       { IPv4(209,10,24,0),21 },
+       { IPv4(209,10,32,0),20 },
+       { IPv4(209,10,42,128),25 },
+       { IPv4(209,10,48,0),21 },
+       { IPv4(209,10,51,0),24 },
+       { IPv4(209,10,56,0),21 },
+       { IPv4(209,10,64,0),19 },
+       { IPv4(209,10,94,0),24 },
+       { IPv4(209,10,96,0),19 },
+       { IPv4(209,10,123,0),24 },
+       { IPv4(209,10,125,0),24 },
+       { IPv4(209,10,128,0),20 },
+       { IPv4(209,10,128,0),23 },
+       { IPv4(209,10,130,0),23 },
+       { IPv4(209,10,144,0),21 },
+       { IPv4(209,10,146,0),24 },
+       { IPv4(209,10,152,0),22 },
+       { IPv4(209,10,156,0),22 },
+       { IPv4(209,10,160,0),21 },
+       { IPv4(209,10,168,0),21 },
+       { IPv4(209,10,176,0),20 },
+       { IPv4(209,10,180,0),24 },
+       { IPv4(209,10,192,0),21 },
+       { IPv4(209,10,200,0),22 },
+       { IPv4(209,10,204,0),22 },
+       { IPv4(209,10,208,0),20 },
+       { IPv4(209,10,214,0),24 },
+       { IPv4(209,10,224,0),20 },
+       { IPv4(209,10,228,128),25 },
+       { IPv4(209,10,240,0),20 },
+       { IPv4(209,10,244,0),23 },
+       { IPv4(209,10,252,0),24 },
+       { IPv4(209,11,0,0),22 },
+       { IPv4(209,11,0,0),17 },
+       { IPv4(209,11,4,0),22 },
+       { IPv4(209,11,5,128),25 },
+       { IPv4(209,11,8,0),21 },
+       { IPv4(209,11,16,0),20 },
+       { IPv4(209,11,32,0),19 },
+       { IPv4(209,11,56,0),24 },
+       { IPv4(209,11,64,0),19 },
+       { IPv4(209,11,81,0),24 },
+       { IPv4(209,11,96,0),20 },
+       { IPv4(209,11,98,0),24 },
+       { IPv4(209,11,112,0),20 },
+       { IPv4(209,11,121,0),24 },
+       { IPv4(209,11,122,0),24 },
+       { IPv4(209,11,128,0),19 },
+       { IPv4(209,11,135,0),24 },
+       { IPv4(209,11,160,0),21 },
+       { IPv4(209,11,160,0),19 },
+       { IPv4(209,11,176,0),20 },
+       { IPv4(209,11,192,0),19 },
+       { IPv4(209,11,216,0),21 },
+       { IPv4(209,12,0,0),16 },
+       { IPv4(209,12,38,0),24 },
+       { IPv4(209,12,61,0),24 },
+       { IPv4(209,12,62,0),23 },
+       { IPv4(209,12,65,0),24 },
+       { IPv4(209,12,74,0),24 },
+       { IPv4(209,12,75,0),24 },
+       { IPv4(209,12,118,0),24 },
+       { IPv4(209,12,138,0),23 },
+       { IPv4(209,12,140,0),22 },
+       { IPv4(209,12,144,0),21 },
+       { IPv4(209,12,152,0),24 },
+       { IPv4(209,12,183,0),24 },
+       { IPv4(209,13,0,0),16 },
+       { IPv4(209,14,136,0),24 },
+       { IPv4(209,16,128,0),18 },
+       { IPv4(209,17,64,0),19 },
+       { IPv4(209,17,96,0),24 },
+       { IPv4(209,17,192,0),19 },
+       { IPv4(209,18,128,0),17 },
+       { IPv4(209,19,0,0),17 },
+       { IPv4(209,19,4,0),24 },
+       { IPv4(209,19,5,0),24 },
+       { IPv4(209,19,68,0),24 },
+       { IPv4(209,19,75,0),24 },
+       { IPv4(209,19,84,0),24 },
+       { IPv4(209,19,139,0),24 },
+       { IPv4(209,19,192,0),18 },
+       { IPv4(209,19,212,0),24 },
+       { IPv4(209,20,64,0),19 },
+       { IPv4(209,21,0,0),18 },
+       { IPv4(209,21,104,0),21 },
+       { IPv4(209,21,128,0),17 },
+       { IPv4(209,21,136,0),21 },
+       { IPv4(209,21,144,0),21 },
+       { IPv4(209,22,2,0),24 },
+       { IPv4(209,22,6,0),24 },
+       { IPv4(209,22,7,0),24 },
+       { IPv4(209,22,8,0),24 },
+       { IPv4(209,22,25,0),24 },
+       { IPv4(209,22,37,0),24 },
+       { IPv4(209,22,47,0),24 },
+       { IPv4(209,22,51,0),24 },
+       { IPv4(209,22,60,0),24 },
+       { IPv4(209,22,153,0),24 },
+       { IPv4(209,22,161,0),24 },
+       { IPv4(209,22,162,0),24 },
+       { IPv4(209,22,181,0),24 },
+       { IPv4(209,22,182,0),24 },
+       { IPv4(209,22,186,0),23 },
+       { IPv4(209,22,212,0),24 },
+       { IPv4(209,22,213,0),24 },
+       { IPv4(209,22,214,0),24 },
+       { IPv4(209,22,215,0),24 },
+       { IPv4(209,22,216,0),24 },
+       { IPv4(209,22,217,0),24 },
+       { IPv4(209,22,218,0),24 },
+       { IPv4(209,22,219,0),24 },
+       { IPv4(209,23,80,0),20 },
+       { IPv4(209,23,82,0),24 },
+       { IPv4(209,24,0,0),16 },
+       { IPv4(209,25,0,0),23 },
+       { IPv4(209,25,24,0),24 },
+       { IPv4(209,25,30,0),24 },
+       { IPv4(209,25,32,0),23 },
+       { IPv4(209,25,34,0),24 },
+       { IPv4(209,25,40,0),24 },
+       { IPv4(209,25,41,0),24 },
+       { IPv4(209,25,42,0),24 },
+       { IPv4(209,25,43,0),24 },
+       { IPv4(209,25,85,0),24 },
+       { IPv4(209,25,86,0),23 },
+       { IPv4(209,25,91,0),24 },
+       { IPv4(209,25,92,0),24 },
+       { IPv4(209,25,92,0),23 },
+       { IPv4(209,25,93,0),24 },
+       { IPv4(209,25,98,0),23 },
+       { IPv4(209,25,100,0),24 },
+       { IPv4(209,25,124,0),24 },
+       { IPv4(209,25,128,0),18 },
+       { IPv4(209,25,192,0),19 },
+       { IPv4(209,25,224,0),20 },
+       { IPv4(209,25,240,0),20 },
+       { IPv4(209,26,32,0),22 },
+       { IPv4(209,26,178,0),23 },
+       { IPv4(209,26,182,0),24 },
+       { IPv4(209,27,3,0),24 },
+       { IPv4(209,27,102,0),24 },
+       { IPv4(209,27,137,0),24 },
+       { IPv4(209,27,197,0),24 },
+       { IPv4(209,27,198,0),23 },
+       { IPv4(209,27,236,0),22 },
+       { IPv4(209,27,240,0),24 },
+       { IPv4(209,27,244,0),23 },
+       { IPv4(209,27,244,0),22 },
+       { IPv4(209,27,246,0),23 },
+       { IPv4(209,27,248,0),21 },
+       { IPv4(209,28,0,0),16 },
+       { IPv4(209,28,6,0),24 },
+       { IPv4(209,28,9,0),24 },
+       { IPv4(209,28,16,0),24 },
+       { IPv4(209,28,34,0),24 },
+       { IPv4(209,28,56,0),24 },
+       { IPv4(209,28,69,0),24 },
+       { IPv4(209,28,71,0),24 },
+       { IPv4(209,28,75,0),24 },
+       { IPv4(209,28,82,0),24 },
+       { IPv4(209,28,174,0),24 },
+       { IPv4(209,30,0,0),24 },
+       { IPv4(209,31,0,0),16 },
+       { IPv4(209,31,80,0),24 },
+       { IPv4(209,31,128,0),21 },
+       { IPv4(209,32,0,0),16 },
+       { IPv4(209,32,92,0),22 },
+       { IPv4(209,32,128,0),19 },
+       { IPv4(209,32,224,0),22 },
+       { IPv4(209,34,0,0),19 },
+       { IPv4(209,34,32,0),19 },
+       { IPv4(209,35,0,0),16 },
+       { IPv4(209,36,0,0),15 },
+       { IPv4(209,36,53,0),24 },
+       { IPv4(209,36,95,0),24 },
+       { IPv4(209,36,112,0),24 },
+       { IPv4(209,36,113,0),24 },
+       { IPv4(209,36,114,0),24 },
+       { IPv4(209,36,115,0),24 },
+       { IPv4(209,36,128,0),24 },
+       { IPv4(209,37,4,0),24 },
+       { IPv4(209,37,80,0),24 },
+       { IPv4(209,37,81,0),24 },
+       { IPv4(209,37,82,0),24 },
+       { IPv4(209,37,83,0),24 },
+       { IPv4(209,37,85,0),24 },
+       { IPv4(209,37,93,0),24 },
+       { IPv4(209,37,138,0),24 },
+       { IPv4(209,37,145,0),24 },
+       { IPv4(209,39,0,0),16 },
+       { IPv4(209,39,118,0),24 },
+       { IPv4(209,39,119,0),24 },
+       { IPv4(209,40,192,0),21 },
+       { IPv4(209,41,0,0),18 },
+       { IPv4(209,41,64,0),18 },
+       { IPv4(209,41,128,0),20 },
+       { IPv4(209,41,128,0),19 },
+       { IPv4(209,41,160,0),20 },
+       { IPv4(209,41,176,0),21 },
+       { IPv4(209,41,192,0),18 },
+       { IPv4(209,41,207,0),24 },
+       { IPv4(209,41,224,0),24 },
+       { IPv4(209,41,244,0),24 },
+       { IPv4(209,41,247,0),24 },
+       { IPv4(209,42,32,0),20 },
+       { IPv4(209,43,128,0),17 },
+       { IPv4(209,43,130,0),24 },
+       { IPv4(209,43,250,0),24 },
+       { IPv4(209,44,14,0),24 },
+       { IPv4(209,44,64,0),18 },
+       { IPv4(209,44,73,0),24 },
+       { IPv4(209,44,99,0),24 },
+       { IPv4(209,44,100,0),24 },
+       { IPv4(209,44,106,0),24 },
+       { IPv4(209,44,107,0),24 },
+       { IPv4(209,44,108,0),24 },
+       { IPv4(209,44,109,0),24 },
+       { IPv4(209,44,119,0),24 },
+       { IPv4(209,44,124,0),24 },
+       { IPv4(209,45,128,0),24 },
+       { IPv4(209,45,129,0),24 },
+       { IPv4(209,45,130,0),24 },
+       { IPv4(209,45,200,0),23 },
+       { IPv4(209,45,202,0),23 },
+       { IPv4(209,46,0,0),17 },
+       { IPv4(209,46,128,0),17 },
+       { IPv4(209,46,129,0),24 },
+       { IPv4(209,46,140,0),24 },
+       { IPv4(209,46,141,0),24 },
+       { IPv4(209,46,146,0),24 },
+       { IPv4(209,46,147,0),24 },
+       { IPv4(209,47,88,0),24 },
+       { IPv4(209,47,172,0),24 },
+       { IPv4(209,47,192,0),24 },
+       { IPv4(209,48,11,0),24 },
+       { IPv4(209,49,80,0),21 },
+       { IPv4(209,49,88,0),22 },
+       { IPv4(209,49,100,0),23 },
+       { IPv4(209,49,168,0),24 },
+       { IPv4(209,49,172,0),22 },
+       { IPv4(209,50,35,0),24 },
+       { IPv4(209,50,37,0),24 },
+       { IPv4(209,50,44,0),24 },
+       { IPv4(209,50,45,0),24 },
+       { IPv4(209,50,46,0),24 },
+       { IPv4(209,50,128,0),19 },
+       { IPv4(209,50,192,0),19 },
+       { IPv4(209,50,224,0),19 },
+       { IPv4(209,51,0,0),19 },
+       { IPv4(209,51,48,0),20 },
+       { IPv4(209,51,128,0),19 },
+       { IPv4(209,51,160,0),19 },
+       { IPv4(209,51,224,0),19 },
+       { IPv4(209,51,227,0),24 },
+       { IPv4(209,51,228,0),24 },
+       { IPv4(209,51,239,0),24 },
+       { IPv4(209,51,240,0),24 },
+       { IPv4(209,51,255,0),24 },
+       { IPv4(209,54,28,0),22 },
+       { IPv4(209,54,32,0),22 },
+       { IPv4(209,54,36,0),22 },
+       { IPv4(209,54,42,0),24 },
+       { IPv4(209,54,53,0),24 },
+       { IPv4(209,54,72,0),21 },
+       { IPv4(209,54,93,0),24 },
+       { IPv4(209,54,111,0),24 },
+       { IPv4(209,54,123,0),24 },
+       { IPv4(209,54,192,0),20 },
+       { IPv4(209,54,196,0),24 },
+       { IPv4(209,55,64,0),18 },
+       { IPv4(209,55,128,0),24 },
+       { IPv4(209,55,255,0),24 },
+       { IPv4(209,56,0,0),16 },
+       { IPv4(209,56,96,0),21 },
+       { IPv4(209,57,0,0),16 },
+       { IPv4(209,57,144,0),21 },
+       { IPv4(209,58,60,0),24 },
+       { IPv4(209,58,61,0),24 },
+       { IPv4(209,58,68,0),23 },
+       { IPv4(209,58,76,0),24 },
+       { IPv4(209,58,84,0),24 },
+       { IPv4(209,58,140,0),24 },
+       { IPv4(209,58,224,0),20 },
+       { IPv4(209,60,0,0),16 },
+       { IPv4(209,60,1,0),24 },
+       { IPv4(209,60,14,0),24 },
+       { IPv4(209,60,15,0),24 },
+       { IPv4(209,60,16,0),23 },
+       { IPv4(209,60,34,0),24 },
+       { IPv4(209,60,70,0),24 },
+       { IPv4(209,60,71,0),24 },
+       { IPv4(209,60,72,0),23 },
+       { IPv4(209,60,77,0),24 },
+       { IPv4(209,60,88,0),24 },
+       { IPv4(209,60,90,0),24 },
+       { IPv4(209,60,140,0),23 },
+       { IPv4(209,60,142,0),24 },
+       { IPv4(209,60,160,0),24 },
+       { IPv4(209,60,164,0),24 },
+       { IPv4(209,60,167,0),24 },
+       { IPv4(209,60,170,0),24 },
+       { IPv4(209,60,223,0),24 },
+       { IPv4(209,60,242,0),24 },
+       { IPv4(209,60,252,0),23 },
+       { IPv4(209,60,254,0),24 },
+       { IPv4(209,61,85,0),24 },
+       { IPv4(209,61,86,0),24 },
+       { IPv4(209,61,128,0),18 },
+       { IPv4(209,61,192,0),19 },
+       { IPv4(209,61,224,0),20 },
+       { IPv4(209,62,30,0),23 },
+       { IPv4(209,62,32,0),23 },
+       { IPv4(209,62,35,0),24 },
+       { IPv4(209,62,36,0),22 },
+       { IPv4(209,62,40,0),22 },
+       { IPv4(209,62,44,0),24 },
+       { IPv4(209,62,45,0),24 },
+       { IPv4(209,64,0,0),15 },
+       { IPv4(209,64,11,0),24 },
+       { IPv4(209,64,25,0),24 },
+       { IPv4(209,64,139,0),24 },
+       { IPv4(209,64,142,0),24 },
+       { IPv4(209,64,152,0),22 },
+       { IPv4(209,64,156,0),23 },
+       { IPv4(209,64,181,0),24 },
+       { IPv4(209,64,182,0),24 },
+       { IPv4(209,64,202,0),24 },
+       { IPv4(209,65,16,0),24 },
+       { IPv4(209,65,17,0),24 },
+       { IPv4(209,65,18,0),24 },
+       { IPv4(209,65,19,0),24 },
+       { IPv4(209,65,36,0),22 },
+       { IPv4(209,66,0,0),19 },
+       { IPv4(209,66,64,0),18 },
+       { IPv4(209,66,100,0),23 },
+       { IPv4(209,67,18,0),24 },
+       { IPv4(209,67,42,0),24 },
+       { IPv4(209,67,48,0),22 },
+       { IPv4(209,67,152,0),24 },
+       { IPv4(209,68,0,0),18 },
+       { IPv4(209,68,128,0),19 },
+       { IPv4(209,68,192,0),18 },
+       { IPv4(209,69,0,0),16 },
+       { IPv4(209,70,0,0),16 },
+       { IPv4(209,70,175,0),24 },
+       { IPv4(209,72,0,0),24 },
+       { IPv4(209,72,0,0),16 },
+       { IPv4(209,72,132,0),24 },
+       { IPv4(209,72,133,0),24 },
+       { IPv4(209,72,134,0),24 },
+       { IPv4(209,72,135,0),24 },
+       { IPv4(209,72,136,0),24 },
+       { IPv4(209,72,137,0),24 },
+       { IPv4(209,72,138,0),24 },
+       { IPv4(209,72,139,0),24 },
+       { IPv4(209,72,140,0),24 },
+       { IPv4(209,72,141,0),24 },
+       { IPv4(209,72,142,0),24 },
+       { IPv4(209,72,143,0),24 },
+       { IPv4(209,72,144,0),24 },
+       { IPv4(209,72,145,0),24 },
+       { IPv4(209,72,149,0),24 },
+       { IPv4(209,72,150,0),24 },
+       { IPv4(209,72,151,0),24 },
+       { IPv4(209,72,152,0),24 },
+       { IPv4(209,72,154,0),24 },
+       { IPv4(209,72,155,0),24 },
+       { IPv4(209,72,156,0),24 },
+       { IPv4(209,72,157,0),24 },
+       { IPv4(209,72,158,0),24 },
+       { IPv4(209,72,159,0),24 },
+       { IPv4(209,72,160,0),24 },
+       { IPv4(209,72,161,0),24 },
+       { IPv4(209,72,162,0),24 },
+       { IPv4(209,72,163,0),24 },
+       { IPv4(209,72,164,0),24 },
+       { IPv4(209,72,165,0),24 },
+       { IPv4(209,73,0,0),18 },
+       { IPv4(209,73,40,0),24 },
+       { IPv4(209,73,64,0),18 },
+       { IPv4(209,73,192,0),18 },
+       { IPv4(209,74,0,0),18 },
+       { IPv4(209,74,96,0),19 },
+       { IPv4(209,74,128,0),18 },
+       { IPv4(209,74,148,0),24 },
+       { IPv4(209,74,155,0),24 },
+       { IPv4(209,74,224,0),20 },
+       { IPv4(209,75,0,0),16 },
+       { IPv4(209,75,4,0),22 },
+       { IPv4(209,75,112,0),21 },
+       { IPv4(209,78,0,0),19 },
+       { IPv4(209,79,64,0),19 },
+       { IPv4(209,80,64,0),24 },
+       { IPv4(209,80,65,0),24 },
+       { IPv4(209,80,66,0),23 },
+       { IPv4(209,80,68,0),24 },
+       { IPv4(209,80,72,0),24 },
+       { IPv4(209,80,76,0),24 },
+       { IPv4(209,80,80,0),24 },
+       { IPv4(209,80,88,0),24 },
+       { IPv4(209,80,116,0),24 },
+       { IPv4(209,80,118,0),24 },
+       { IPv4(209,80,120,0),24 },
+       { IPv4(209,80,122,0),24 },
+       { IPv4(209,81,0,0),18 },
+       { IPv4(209,81,55,0),24 },
+       { IPv4(209,81,56,0),24 },
+       { IPv4(209,81,57,0),24 },
+       { IPv4(209,81,58,0),24 },
+       { IPv4(209,81,59,0),24 },
+       { IPv4(209,81,60,0),24 },
+       { IPv4(209,81,61,0),24 },
+       { IPv4(209,81,64,0),19 },
+       { IPv4(209,81,69,0),24 },
+       { IPv4(209,81,139,0),24 },
+       { IPv4(209,81,164,0),22 },
+       { IPv4(209,81,204,0),22 },
+       { IPv4(209,81,216,0),22 },
+       { IPv4(209,82,0,0),17 },
+       { IPv4(209,83,1,0),24 },
+       { IPv4(209,83,16,0),23 },
+       { IPv4(209,83,165,0),24 },
+       { IPv4(209,83,168,0),23 },
+       { IPv4(209,84,64,0),21 },
+       { IPv4(209,84,182,0),24 },
+       { IPv4(209,84,183,0),24 },
+       { IPv4(209,85,0,0),16 },
+       { IPv4(209,86,0,0),21 },
+       { IPv4(209,86,0,0),16 },
+       { IPv4(209,86,8,0),22 },
+       { IPv4(209,86,32,0),19 },
+       { IPv4(209,87,128,0),20 },
+       { IPv4(209,87,144,0),20 },
+       { IPv4(209,87,192,0),20 },
+       { IPv4(209,88,112,0),24 },
+       { IPv4(209,88,113,0),24 },
+       { IPv4(209,88,114,0),24 },
+       { IPv4(209,88,164,0),22 },
+       { IPv4(209,88,165,0),24 },
+       { IPv4(209,90,32,0),22 },
+       { IPv4(209,90,36,0),24 },
+       { IPv4(209,90,38,0),24 },
+       { IPv4(209,90,39,0),24 },
+       { IPv4(209,90,128,0),18 },
+       { IPv4(209,90,192,0),18 },
+       { IPv4(209,91,128,0),18 },
+       { IPv4(209,91,141,0),24 },
+       { IPv4(209,91,147,0),24 },
+       { IPv4(209,91,148,0),24 },
+       { IPv4(209,91,149,0),24 },
+       { IPv4(209,91,155,0),24 },
+       { IPv4(209,91,156,0),24 },
+       { IPv4(209,91,158,0),24 },
+       { IPv4(209,91,167,0),24 },
+       { IPv4(209,93,0,0),17 },
+       { IPv4(209,93,4,0),24 },
+       { IPv4(209,93,12,0),24 },
+       { IPv4(209,93,15,0),24 },
+       { IPv4(209,93,21,0),24 },
+       { IPv4(209,93,22,0),24 },
+       { IPv4(209,93,24,0),24 },
+       { IPv4(209,93,31,0),24 },
+       { IPv4(209,93,45,0),24 },
+       { IPv4(209,93,46,0),24 },
+       { IPv4(209,93,63,0),24 },
+       { IPv4(209,93,64,0),24 },
+       { IPv4(209,93,72,0),24 },
+       { IPv4(209,93,102,0),24 },
+       { IPv4(209,93,128,0),18 },
+       { IPv4(209,93,128,0),24 },
+       { IPv4(209,93,133,0),24 },
+       { IPv4(209,93,179,0),24 },
+       { IPv4(209,93,185,0),24 },
+       { IPv4(209,93,192,0),19 },
+       { IPv4(209,93,192,0),24 },
+       { IPv4(209,93,195,0),24 },
+       { IPv4(209,93,196,0),24 },
+       { IPv4(209,93,201,0),24 },
+       { IPv4(209,93,206,0),24 },
+       { IPv4(209,93,216,0),24 },
+       { IPv4(209,93,224,0),19 },
+       { IPv4(209,93,226,0),24 },
+       { IPv4(209,93,235,0),24 },
+       { IPv4(209,93,237,0),24 },
+       { IPv4(209,93,238,0),24 },
+       { IPv4(209,93,254,0),24 },
+       { IPv4(209,94,0,0),19 },
+       { IPv4(209,94,96,0),19 },
+       { IPv4(209,94,128,0),19 },
+       { IPv4(209,94,211,0),24 },
+       { IPv4(209,95,0,0),19 },
+       { IPv4(209,95,32,0),19 },
+       { IPv4(209,95,95,0),24 },
+       { IPv4(209,98,0,0),16 },
+       { IPv4(209,98,16,0),20 },
+       { IPv4(209,98,70,0),24 },
+       { IPv4(209,98,89,0),24 },
+       { IPv4(209,98,97,0),24 },
+       { IPv4(209,98,132,0),22 },
+       { IPv4(209,98,164,0),24 },
+       { IPv4(209,99,0,0),17 },
+       { IPv4(209,99,232,0),23 },
+       { IPv4(209,100,32,0),21 },
+       { IPv4(209,100,32,0),24 },
+       { IPv4(209,100,33,0),24 },
+       { IPv4(209,100,35,0),24 },
+       { IPv4(209,100,36,0),24 },
+       { IPv4(209,100,37,0),24 },
+       { IPv4(209,100,38,0),24 },
+       { IPv4(209,100,39,0),24 },
+       { IPv4(209,100,42,0),24 },
+       { IPv4(209,100,104,0),21 },
+       { IPv4(209,100,120,0),21 },
+       { IPv4(209,100,156,0),22 },
+       { IPv4(209,101,20,0),24 },
+       { IPv4(209,101,24,0),24 },
+       { IPv4(209,101,32,0),24 },
+       { IPv4(209,101,39,0),24 },
+       { IPv4(209,101,40,0),24 },
+       { IPv4(209,101,64,0),21 },
+       { IPv4(209,101,254,0),24 },
+       { IPv4(209,102,21,0),24 },
+       { IPv4(209,102,23,0),24 },
+       { IPv4(209,102,28,0),24 },
+       { IPv4(209,102,72,0),22 },
+       { IPv4(209,102,76,0),23 },
+       { IPv4(209,102,92,0),22 },
+       { IPv4(209,102,96,0),22 },
+       { IPv4(209,102,192,0),19 },
+       { IPv4(209,103,128,0),19 },
+       { IPv4(209,104,32,0),24 },
+       { IPv4(209,104,33,0),24 },
+       { IPv4(209,104,34,0),23 },
+       { IPv4(209,104,36,0),22 },
+       { IPv4(209,104,42,0),23 },
+       { IPv4(209,104,44,0),24 },
+       { IPv4(209,104,61,0),24 },
+       { IPv4(209,104,62,0),24 },
+       { IPv4(209,104,63,0),24 },
+       { IPv4(209,105,0,0),17 },
+       { IPv4(209,106,0,0),17 },
+       { IPv4(209,106,128,0),18 },
+       { IPv4(209,106,192,0),19 },
+       { IPv4(209,106,224,0),19 },
+       { IPv4(209,107,0,0),18 },
+       { IPv4(209,107,31,0),24 },
+       { IPv4(209,107,64,0),19 },
+       { IPv4(209,107,128,0),18 },
+       { IPv4(209,108,0,0),15 },
+       { IPv4(209,108,0,0),14 },
+       { IPv4(209,108,96,0),20 },
+       { IPv4(209,109,8,0),22 },
+       { IPv4(209,109,28,0),22 },
+       { IPv4(209,109,56,0),21 },
+       { IPv4(209,109,59,0),24 },
+       { IPv4(209,109,130,0),23 },
+       { IPv4(209,109,133,0),24 },
+       { IPv4(209,109,134,0),24 },
+       { IPv4(209,109,140,0),23 },
+       { IPv4(209,109,144,0),23 },
+       { IPv4(209,109,150,0),24 },
+       { IPv4(209,109,224,0),21 },
+       { IPv4(209,110,97,0),24 },
+       { IPv4(209,111,0,0),24 },
+       { IPv4(209,111,5,0),24 },
+       { IPv4(209,111,6,0),23 },
+       { IPv4(209,111,6,0),24 },
+       { IPv4(209,111,216,0),24 },
+       { IPv4(209,111,217,0),24 },
+       { IPv4(209,111,218,0),24 },
+       { IPv4(209,111,219,0),24 },
+       { IPv4(209,111,220,0),24 },
+       { IPv4(209,112,0,0),18 },
+       { IPv4(209,112,96,0),20 },
+       { IPv4(209,112,128,0),18 },
+       { IPv4(209,112,192,0),19 },
+       { IPv4(209,113,128,0),17 },
+       { IPv4(209,113,170,0),24 },
+       { IPv4(209,114,0,0),18 },
+       { IPv4(209,114,128,0),18 },
+       { IPv4(209,114,189,0),24 },
+       { IPv4(209,115,0,0),17 },
+       { IPv4(209,115,25,0),24 },
+       { IPv4(209,115,29,0),24 },
+       { IPv4(209,115,38,0),24 },
+       { IPv4(209,115,39,0),24 },
+       { IPv4(209,115,53,0),24 },
+       { IPv4(209,115,94,0),24 },
+       { IPv4(209,115,120,0),22 },
+       { IPv4(209,116,0,0),21 },
+       { IPv4(209,116,118,0),24 },
+       { IPv4(209,116,172,0),24 },
+       { IPv4(209,117,106,0),24 },
+       { IPv4(209,117,122,0),24 },
+       { IPv4(209,117,156,0),24 },
+       { IPv4(209,117,158,0),24 },
+       { IPv4(209,117,200,0),22 },
+       { IPv4(209,117,204,0),23 },
+       { IPv4(209,117,206,0),23 },
+       { IPv4(209,117,208,0),21 },
+       { IPv4(209,118,28,0),24 },
+       { IPv4(209,118,74,0),24 },
+       { IPv4(209,118,138,0),23 },
+       { IPv4(209,118,182,0),24 },
+       { IPv4(209,118,183,0),24 },
+       { IPv4(209,118,231,0),24 },
+       { IPv4(209,118,248,0),22 },
+       { IPv4(209,119,36,0),23 },
+       { IPv4(209,119,93,0),24 },
+       { IPv4(209,119,196,0),22 },
+       { IPv4(209,119,226,0),23 },
+       { IPv4(209,119,228,0),22 },
+       { IPv4(209,122,0,0),16 },
+       { IPv4(209,123,0,0),16 },
+       { IPv4(209,123,45,0),24 },
+       { IPv4(209,123,72,0),24 },
+       { IPv4(209,123,73,0),24 },
+       { IPv4(209,123,74,0),24 },
+       { IPv4(209,123,75,0),24 },
+       { IPv4(209,123,190,0),23 },
+       { IPv4(209,123,219,0),24 },
+       { IPv4(209,124,0,0),19 },
+       { IPv4(209,124,64,0),19 },
+       { IPv4(209,124,96,0),20 },
+       { IPv4(209,124,128,0),19 },
+       { IPv4(209,124,192,0),19 },
+       { IPv4(209,124,224,0),19 },
+       { IPv4(209,125,0,0),16 },
+       { IPv4(209,125,17,0),24 },
+       { IPv4(209,125,47,0),24 },
+       { IPv4(209,125,49,0),24 },
+       { IPv4(209,125,62,0),24 },
+       { IPv4(209,125,93,0),24 },
+       { IPv4(209,125,149,0),24 },
+       { IPv4(209,126,128,0),19 },
+       { IPv4(209,126,160,0),20 },
+       { IPv4(209,126,176,0),20 },
+       { IPv4(209,128,64,0),19 },
+       { IPv4(209,128,96,0),19 },
+       { IPv4(209,128,192,0),19 },
+       { IPv4(209,129,0,0),16 },
+       { IPv4(209,129,40,0),22 },
+       { IPv4(209,129,44,0),23 },
+       { IPv4(209,130,0,0),17 },
+       { IPv4(209,130,152,0),24 },
+       { IPv4(209,130,153,0),24 },
+       { IPv4(209,130,154,0),24 },
+       { IPv4(209,130,155,0),24 },
+       { IPv4(209,130,156,0),24 },
+       { IPv4(209,130,157,0),24 },
+       { IPv4(209,130,158,0),24 },
+       { IPv4(209,130,159,0),24 },
+       { IPv4(209,131,96,0),22 },
+       { IPv4(209,131,96,0),20 },
+       { IPv4(209,131,100,0),22 },
+       { IPv4(209,132,0,0),17 },
+       { IPv4(209,132,207,0),24 },
+       { IPv4(209,132,212,0),24 },
+       { IPv4(209,132,213,0),24 },
+       { IPv4(209,132,214,0),24 },
+       { IPv4(209,133,0,0),17 },
+       { IPv4(209,133,21,0),24 },
+       { IPv4(209,133,28,0),23 },
+       { IPv4(209,133,38,0),24 },
+       { IPv4(209,133,50,0),24 },
+       { IPv4(209,133,93,0),24 },
+       { IPv4(209,133,117,0),24 },
+       { IPv4(209,133,128,0),18 },
+       { IPv4(209,134,128,0),19 },
+       { IPv4(209,134,160,0),19 },
+       { IPv4(209,135,192,0),18 },
+       { IPv4(209,136,0,0),16 },
+       { IPv4(209,136,21,0),24 },
+       { IPv4(209,136,22,0),24 },
+       { IPv4(209,136,26,0),24 },
+       { IPv4(209,136,27,0),24 },
+       { IPv4(209,136,28,0),24 },
+       { IPv4(209,136,29,0),24 },
+       { IPv4(209,136,30,0),24 },
+       { IPv4(209,136,31,0),24 },
+       { IPv4(209,136,32,0),24 },
+       { IPv4(209,136,33,0),24 },
+       { IPv4(209,136,34,0),24 },
+       { IPv4(209,136,35,0),24 },
+       { IPv4(209,136,36,0),24 },
+       { IPv4(209,136,64,0),24 },
+       { IPv4(209,136,70,0),24 },
+       { IPv4(209,136,72,0),24 },
+       { IPv4(209,136,81,0),24 },
+       { IPv4(209,136,82,0),24 },
+       { IPv4(209,136,164,0),22 },
+       { IPv4(209,136,168,0),23 },
+       { IPv4(209,136,170,0),24 },
+       { IPv4(209,136,249,0),24 },
+       { IPv4(209,136,250,0),24 },
+       { IPv4(209,136,251,0),24 },
+       { IPv4(209,136,252,0),24 },
+       { IPv4(209,137,128,0),19 },
+       { IPv4(209,137,136,0),21 },
+       { IPv4(209,137,144,0),21 },
+       { IPv4(209,137,152,0),21 },
+       { IPv4(209,137,160,0),20 },
+       { IPv4(209,137,192,0),19 },
+       { IPv4(209,138,65,0),24 },
+       { IPv4(209,139,0,0),17 },
+       { IPv4(209,139,128,0),18 },
+       { IPv4(209,140,168,0),21 },
+       { IPv4(209,140,192,0),19 },
+       { IPv4(209,141,4,0),24 },
+       { IPv4(209,141,26,0),24 },
+       { IPv4(209,141,28,0),24 },
+       { IPv4(209,141,66,0),24 },
+       { IPv4(209,141,67,0),24 },
+       { IPv4(209,141,72,0),21 },
+       { IPv4(209,141,104,0),24 },
+       { IPv4(209,141,112,0),21 },
+       { IPv4(209,141,123,0),24 },
+       { IPv4(209,141,180,0),22 },
+       { IPv4(209,141,184,0),24 },
+       { IPv4(209,141,228,0),23 },
+       { IPv4(209,141,241,0),24 },
+       { IPv4(209,143,0,0),18 },
+       { IPv4(209,144,20,0),23 },
+       { IPv4(209,144,52,0),24 },
+       { IPv4(209,144,54,0),24 },
+       { IPv4(209,144,55,0),24 },
+       { IPv4(209,144,136,0),23 },
+       { IPv4(209,144,210,0),24 },
+       { IPv4(209,144,211,0),24 },
+       { IPv4(209,144,219,0),24 },
+       { IPv4(209,146,63,0),24 },
+       { IPv4(209,146,128,0),20 },
+       { IPv4(209,146,128,0),17 },
+       { IPv4(209,146,128,0),18 },
+       { IPv4(209,146,144,0),21 },
+       { IPv4(209,146,147,0),24 },
+       { IPv4(209,146,152,0),22 },
+       { IPv4(209,146,155,0),24 },
+       { IPv4(209,146,156,0),24 },
+       { IPv4(209,146,160,0),20 },
+       { IPv4(209,146,164,0),24 },
+       { IPv4(209,146,171,0),24 },
+       { IPv4(209,146,172,0),24 },
+       { IPv4(209,146,173,0),24 },
+       { IPv4(209,146,176,0),22 },
+       { IPv4(209,146,178,0),23 },
+       { IPv4(209,146,182,0),24 },
+       { IPv4(209,146,184,0),21 },
+       { IPv4(209,146,188,0),24 },
+       { IPv4(209,146,192,0),19 },
+       { IPv4(209,146,203,0),24 },
+       { IPv4(209,146,224,0),20 },
+       { IPv4(209,146,230,0),24 },
+       { IPv4(209,146,231,0),24 },
+       { IPv4(209,146,240,0),21 },
+       { IPv4(209,146,244,0),24 },
+       { IPv4(209,146,248,0),24 },
+       { IPv4(209,146,249,0),24 },
+       { IPv4(209,146,250,0),24 },
+       { IPv4(209,146,251,0),24 },
+       { IPv4(209,146,252,0),24 },
+       { IPv4(209,146,253,0),24 },
+       { IPv4(209,147,0,0),18 },
+       { IPv4(209,147,64,0),19 },
+       { IPv4(209,147,128,0),18 },
+       { IPv4(209,149,164,0),23 },
+       { IPv4(209,150,32,0),19 },
+       { IPv4(209,150,88,0),22 },
+       { IPv4(209,150,160,0),19 },
+       { IPv4(209,151,0,0),19 },
+       { IPv4(209,151,32,0),19 },
+       { IPv4(209,151,128,0),20 },
+       { IPv4(209,151,192,0),22 },
+       { IPv4(209,151,196,0),22 },
+       { IPv4(209,151,200,0),22 },
+       { IPv4(209,151,204,0),22 },
+       { IPv4(209,151,208,0),22 },
+       { IPv4(209,151,212,0),22 },
+       { IPv4(209,151,224,0),19 },
+       { IPv4(209,152,64,0),18 },
+       { IPv4(209,152,192,0),19 },
+       { IPv4(209,153,192,0),18 },
+       { IPv4(209,153,205,0),24 },
+       { IPv4(209,154,100,0),24 },
+       { IPv4(209,155,0,0),16 },
+       { IPv4(209,155,25,0),24 },
+       { IPv4(209,155,26,0),23 },
+       { IPv4(209,155,28,0),22 },
+       { IPv4(209,155,42,0),24 },
+       { IPv4(209,155,43,0),24 },
+       { IPv4(209,155,59,0),24 },
+       { IPv4(209,155,75,0),24 },
+       { IPv4(209,155,76,0),24 },
+       { IPv4(209,155,88,0),24 },
+       { IPv4(209,155,110,0),24 },
+       { IPv4(209,155,118,0),24 },
+       { IPv4(209,155,124,0),24 },
+       { IPv4(209,155,125,0),24 },
+       { IPv4(209,155,144,0),24 },
+       { IPv4(209,155,145,0),24 },
+       { IPv4(209,155,146,0),24 },
+       { IPv4(209,155,147,0),24 },
+       { IPv4(209,155,156,0),24 },
+       { IPv4(209,155,162,0),23 },
+       { IPv4(209,155,168,0),22 },
+       { IPv4(209,155,185,0),24 },
+       { IPv4(209,155,192,0),23 },
+       { IPv4(209,155,198,0),24 },
+       { IPv4(209,155,199,0),24 },
+       { IPv4(209,155,204,0),22 },
+       { IPv4(209,155,224,0),21 },
+       { IPv4(209,155,238,0),24 },
+       { IPv4(209,157,0,0),16 },
+       { IPv4(209,160,82,0),24 },
+       { IPv4(209,161,0,0),18 },
+       { IPv4(209,161,32,0),19 },
+       { IPv4(209,161,64,0),19 },
+       { IPv4(209,161,96,0),20 },
+       { IPv4(209,162,64,0),18 },
+       { IPv4(209,162,128,0),19 },
+       { IPv4(209,162,202,0),24 },
+       { IPv4(209,163,0,0),18 },
+       { IPv4(209,163,232,0),21 },
+       { IPv4(209,163,244,0),22 },
+       { IPv4(209,163,248,0),22 },
+       { IPv4(209,164,0,0),18 },
+       { IPv4(209,164,128,0),18 },
+       { IPv4(209,165,192,0),19 },
+       { IPv4(209,165,224,0),22 },
+       { IPv4(209,165,230,0),23 },
+       { IPv4(209,165,236,0),24 },
+       { IPv4(209,166,128,0),18 },
+       { IPv4(209,168,0,0),17 },
+       { IPv4(209,168,7,0),24 },
+       { IPv4(209,168,8,0),24 },
+       { IPv4(209,168,63,0),24 },
+       { IPv4(209,170,0,0),18 },
+       { IPv4(209,170,192,0),19 },
+       { IPv4(209,170,224,0),19 },
+       { IPv4(209,172,64,0),18 },
+       { IPv4(209,172,224,0),19 },
+       { IPv4(209,173,0,0),19 },
+       { IPv4(209,173,32,0),24 },
+       { IPv4(209,173,57,0),24 },
+       { IPv4(209,173,58,0),24 },
+       { IPv4(209,173,64,0),20 },
+       { IPv4(209,173,128,0),19 },
+       { IPv4(209,173,160,0),19 },
+       { IPv4(209,174,0,0),16 },
+       { IPv4(209,175,0,0),16 },
+       { IPv4(209,175,208,0),21 },
+       { IPv4(209,176,0,0),22 },
+       { IPv4(209,176,16,0),22 },
+       { IPv4(209,176,198,0),24 },
+       { IPv4(209,176,248,0),22 },
+       { IPv4(209,177,0,0),18 },
+       { IPv4(209,177,6,0),23 },
+       { IPv4(209,177,22,0),23 },
+       { IPv4(209,177,29,0),24 },
+       { IPv4(209,177,41,0),24 },
+       { IPv4(209,177,42,0),24 },
+       { IPv4(209,177,43,0),24 },
+       { IPv4(209,177,44,0),24 },
+       { IPv4(209,177,58,0),24 },
+       { IPv4(209,177,64,0),19 },
+       { IPv4(209,177,94,0),24 },
+       { IPv4(209,177,192,0),24 },
+       { IPv4(209,177,192,0),18 },
+       { IPv4(209,177,192,0),19 },
+       { IPv4(209,177,193,0),24 },
+       { IPv4(209,177,194,0),23 },
+       { IPv4(209,177,196,0),24 },
+       { IPv4(209,177,197,0),24 },
+       { IPv4(209,177,198,0),24 },
+       { IPv4(209,177,199,0),24 },
+       { IPv4(209,177,200,0),24 },
+       { IPv4(209,177,201,0),24 },
+       { IPv4(209,177,202,0),24 },
+       { IPv4(209,177,203,0),24 },
+       { IPv4(209,177,204,0),24 },
+       { IPv4(209,177,205,0),24 },
+       { IPv4(209,177,206,0),24 },
+       { IPv4(209,177,207,0),24 },
+       { IPv4(209,177,208,0),24 },
+       { IPv4(209,177,209,0),24 },
+       { IPv4(209,177,210,0),24 },
+       { IPv4(209,177,212,0),24 },
+       { IPv4(209,177,213,0),24 },
+       { IPv4(209,177,214,0),24 },
+       { IPv4(209,177,216,0),22 },
+       { IPv4(209,177,220,0),23 },
+       { IPv4(209,177,222,0),24 },
+       { IPv4(209,177,223,0),24 },
+       { IPv4(209,178,0,0),17 },
+       { IPv4(209,178,128,0),18 },
+       { IPv4(209,178,213,0),24 },
+       { IPv4(209,179,0,0),16 },
+       { IPv4(209,180,0,0),15 },
+       { IPv4(209,180,28,0),24 },
+       { IPv4(209,180,220,0),22 },
+       { IPv4(209,182,192,0),21 },
+       { IPv4(209,182,200,0),21 },
+       { IPv4(209,182,208,0),21 },
+       { IPv4(209,182,248,0),21 },
+       { IPv4(209,183,0,0),19 },
+       { IPv4(209,183,48,0),21 },
+       { IPv4(209,183,192,0),18 },
+       { IPv4(209,185,128,0),24 },
+       { IPv4(209,185,129,0),24 },
+       { IPv4(209,185,130,0),23 },
+       { IPv4(209,185,149,0),24 },
+       { IPv4(209,185,240,0),22 },
+       { IPv4(209,186,0,0),15 },
+       { IPv4(209,186,0,0),24 },
+       { IPv4(209,186,12,0),24 },
+       { IPv4(209,186,13,0),24 },
+       { IPv4(209,186,14,0),24 },
+       { IPv4(209,186,15,0),24 },
+       { IPv4(209,186,19,0),24 },
+       { IPv4(209,186,58,0),23 },
+       { IPv4(209,186,64,0),24 },
+       { IPv4(209,186,80,0),22 },
+       { IPv4(209,186,84,0),23 },
+       { IPv4(209,186,103,0),24 },
+       { IPv4(209,186,118,0),24 },
+       { IPv4(209,186,132,0),23 },
+       { IPv4(209,186,142,0),24 },
+       { IPv4(209,186,148,0),24 },
+       { IPv4(209,186,149,0),24 },
+       { IPv4(209,186,150,0),24 },
+       { IPv4(209,186,151,0),24 },
+       { IPv4(209,186,186,0),24 },
+       { IPv4(209,186,187,0),24 },
+       { IPv4(209,186,188,0),24 },
+       { IPv4(209,186,189,0),24 },
+       { IPv4(209,186,190,0),23 },
+       { IPv4(209,186,197,0),24 },
+       { IPv4(209,186,240,0),21 },
+       { IPv4(209,186,248,0),22 },
+       { IPv4(209,187,22,0),24 },
+       { IPv4(209,187,49,0),24 },
+       { IPv4(209,187,50,0),23 },
+       { IPv4(209,187,76,0),23 },
+       { IPv4(209,187,78,0),23 },
+       { IPv4(209,187,89,0),24 },
+       { IPv4(209,187,90,0),24 },
+       { IPv4(209,187,112,0),21 },
+       { IPv4(209,187,137,0),24 },
+       { IPv4(209,187,140,0),24 },
+       { IPv4(209,187,141,0),24 },
+       { IPv4(209,187,142,0),24 },
+       { IPv4(209,187,143,0),24 },
+       { IPv4(209,187,160,0),24 },
+       { IPv4(209,187,161,0),24 },
+       { IPv4(209,187,162,0),24 },
+       { IPv4(209,187,163,0),24 },
+       { IPv4(209,187,164,0),24 },
+       { IPv4(209,187,165,0),24 },
+       { IPv4(209,187,166,0),24 },
+       { IPv4(209,187,167,0),24 },
+       { IPv4(209,187,168,0),22 },
+       { IPv4(209,187,176,0),20 },
+       { IPv4(209,187,200,0),22 },
+       { IPv4(209,187,207,0),24 },
+       { IPv4(209,187,208,0),21 },
+       { IPv4(209,187,216,0),24 },
+       { IPv4(209,187,217,0),24 },
+       { IPv4(209,187,218,0),24 },
+       { IPv4(209,187,219,0),24 },
+       { IPv4(209,187,220,0),24 },
+       { IPv4(209,187,221,0),24 },
+       { IPv4(209,187,224,0),19 },
+       { IPv4(209,189,0,0),17 },
+       { IPv4(209,189,128,0),18 },
+       { IPv4(209,190,128,0),19 },
+       { IPv4(209,190,160,0),19 },
+       { IPv4(209,190,192,0),18 },
+       { IPv4(209,191,78,0),24 },
+       { IPv4(209,191,84,0),24 },
+       { IPv4(209,191,103,0),24 },
+       { IPv4(209,191,116,0),24 },
+       { IPv4(209,191,119,0),24 },
+       { IPv4(209,191,128,0),19 },
+       { IPv4(209,191,133,0),24 },
+       { IPv4(209,191,136,0),24 },
+       { IPv4(209,191,138,0),24 },
+       { IPv4(209,191,139,0),24 },
+       { IPv4(209,191,142,0),24 },
+       { IPv4(209,191,150,0),23 },
+       { IPv4(209,191,152,0),24 },
+       { IPv4(209,191,153,0),24 },
+       { IPv4(209,191,155,0),24 },
+       { IPv4(209,191,160,0),19 },
+       { IPv4(209,191,164,0),23 },
+       { IPv4(209,191,167,0),24 },
+       { IPv4(209,191,168,0),21 },
+       { IPv4(209,191,168,0),24 },
+       { IPv4(209,191,172,0),24 },
+       { IPv4(209,191,173,0),24 },
+       { IPv4(209,191,176,0),22 },
+       { IPv4(209,191,180,0),24 },
+       { IPv4(209,191,182,0),23 },
+       { IPv4(209,191,192,0),19 },
+       { IPv4(209,192,20,0),22 },
+       { IPv4(209,192,48,0),22 },
+       { IPv4(209,192,210,0),24 },
+       { IPv4(209,193,0,0),18 },
+       { IPv4(209,193,64,0),19 },
+       { IPv4(209,193,95,0),24 },
+       { IPv4(209,193,96,0),20 },
+       { IPv4(209,193,128,0),17 },
+       { IPv4(209,194,0,0),16 },
+       { IPv4(209,194,53,0),24 },
+       { IPv4(209,194,68,0),24 },
+       { IPv4(209,194,69,0),24 },
+       { IPv4(209,194,70,0),24 },
+       { IPv4(209,194,71,0),24 },
+       { IPv4(209,194,164,0),24 },
+       { IPv4(209,194,165,0),24 },
+       { IPv4(209,194,166,0),24 },
+       { IPv4(209,194,167,0),24 },
+       { IPv4(209,194,173,0),24 },
+       { IPv4(209,194,192,0),24 },
+       { IPv4(209,194,193,0),24 },
+       { IPv4(209,194,194,0),24 },
+       { IPv4(209,194,194,0),29 },
+       { IPv4(209,194,195,0),24 },
+       { IPv4(209,194,196,0),24 },
+       { IPv4(209,194,197,0),24 },
+       { IPv4(209,194,203,0),24 },
+       { IPv4(209,194,212,0),22 },
+       { IPv4(209,194,216,0),24 },
+       { IPv4(209,195,0,0),19 },
+       { IPv4(209,195,32,0),19 },
+       { IPv4(209,195,64,0),18 },
+       { IPv4(209,195,192,0),19 },
+       { IPv4(209,195,192,0),18 },
+       { IPv4(209,195,224,0),19 },
+       { IPv4(209,196,192,0),24 },
+       { IPv4(209,196,192,0),19 },
+       { IPv4(209,197,0,0),19 },
+       { IPv4(209,197,64,0),18 },
+       { IPv4(209,198,64,0),18 },
+       { IPv4(209,198,192,0),19 },
+       { IPv4(209,198,197,0),24 },
+       { IPv4(209,198,198,0),24 },
+       { IPv4(209,198,199,0),24 },
+       { IPv4(209,198,202,0),23 },
+       { IPv4(209,198,206,0),24 },
+       { IPv4(209,198,209,0),24 },
+       { IPv4(209,198,224,0),20 },
+       { IPv4(209,198,225,0),24 },
+       { IPv4(209,198,232,0),24 },
+       { IPv4(209,198,233,0),24 },
+       { IPv4(209,198,235,0),24 },
+       { IPv4(209,198,237,0),24 },
+       { IPv4(209,198,238,0),24 },
+       { IPv4(209,198,239,0),24 },
+       { IPv4(209,198,240,0),21 },
+       { IPv4(209,198,240,0),23 },
+       { IPv4(209,198,243,0),24 },
+       { IPv4(209,198,245,0),24 },
+       { IPv4(209,198,247,0),24 },
+       { IPv4(209,198,248,0),21 },
+       { IPv4(209,198,250,0),23 },
+       { IPv4(209,198,253,0),24 },
+       { IPv4(209,203,0,0),18 },
+       { IPv4(209,203,80,0),21 },
+       { IPv4(209,203,86,0),24 },
+       { IPv4(209,203,88,0),21 },
+       { IPv4(209,203,92,0),23 },
+       { IPv4(209,203,192,0),19 },
+       { IPv4(209,203,214,0),24 },
+       { IPv4(209,205,71,0),24 },
+       { IPv4(209,205,81,0),24 },
+       { IPv4(209,205,82,0),24 },
+       { IPv4(209,206,168,0),24 },
+       { IPv4(209,206,172,0),24 },
+       { IPv4(209,206,240,0),22 },
+       { IPv4(209,207,128,0),17 },
+       { IPv4(209,208,128,0),17 },
+       { IPv4(209,208,207,0),24 },
+       { IPv4(209,208,210,0),24 },
+       { IPv4(209,208,228,0),24 },
+       { IPv4(209,208,249,0),24 },
+       { IPv4(209,208,250,0),24 },
+       { IPv4(209,209,64,0),19 },
+       { IPv4(209,209,224,0),19 },
+       { IPv4(209,209,248,0),23 },
+       { IPv4(209,209,250,0),23 },
+       { IPv4(209,210,120,0),21 },
+       { IPv4(209,210,228,0),22 },
+       { IPv4(209,210,251,0),24 },
+       { IPv4(209,211,30,0),23 },
+       { IPv4(209,211,107,0),24 },
+       { IPv4(209,211,110,0),24 },
+       { IPv4(209,211,129,0),24 },
+       { IPv4(209,211,143,0),24 },
+       { IPv4(209,211,168,0),23 },
+       { IPv4(209,211,177,0),24 },
+       { IPv4(209,211,188,0),24 },
+       { IPv4(209,211,199,0),24 },
+       { IPv4(209,211,200,0),24 },
+       { IPv4(209,211,201,0),24 },
+       { IPv4(209,211,202,0),24 },
+       { IPv4(209,211,203,0),24 },
+       { IPv4(209,211,204,0),24 },
+       { IPv4(209,212,96,0),19 },
+       { IPv4(209,212,127,0),24 },
+       { IPv4(209,213,32,0),19 },
+       { IPv4(209,213,33,0),24 },
+       { IPv4(209,213,34,0),24 },
+       { IPv4(209,213,43,0),24 },
+       { IPv4(209,213,45,0),24 },
+       { IPv4(209,213,47,0),24 },
+       { IPv4(209,213,51,0),24 },
+       { IPv4(209,213,64,0),19 },
+       { IPv4(209,213,94,0),23 },
+       { IPv4(209,213,96,0),19 },
+       { IPv4(209,213,194,0),24 },
+       { IPv4(209,213,195,0),24 },
+       { IPv4(209,213,198,0),24 },
+       { IPv4(209,216,0,0),18 },
+       { IPv4(209,216,96,0),19 },
+       { IPv4(209,216,192,0),18 },
+       { IPv4(209,217,32,0),20 },
+       { IPv4(209,217,48,0),21 },
+       { IPv4(209,217,64,0),18 },
+       { IPv4(209,217,128,0),18 },
+       { IPv4(209,217,192,0),19 },
+       { IPv4(209,218,0,0),15 },
+       { IPv4(209,218,32,0),23 },
+       { IPv4(209,218,54,0),24 },
+       { IPv4(209,218,64,0),22 },
+       { IPv4(209,218,90,0),24 },
+       { IPv4(209,218,160,0),22 },
+       { IPv4(209,218,201,0),24 },
+       { IPv4(209,218,206,0),24 },
+       { IPv4(209,219,69,0),24 },
+       { IPv4(209,219,188,0),22 },
+       { IPv4(209,219,210,0),24 },
+       { IPv4(209,219,240,0),24 },
+       { IPv4(209,219,241,0),24 },
+       { IPv4(209,219,242,0),24 },
+       { IPv4(209,219,243,0),24 },
+       { IPv4(209,220,0,0),16 },
+       { IPv4(209,220,18,0),23 },
+       { IPv4(209,220,96,0),24 },
+       { IPv4(209,220,118,0),24 },
+       { IPv4(209,220,178,0),24 },
+       { IPv4(209,220,182,0),23 },
+       { IPv4(209,221,136,0),22 },
+       { IPv4(209,221,140,0),24 },
+       { IPv4(209,221,166,0),23 },
+       { IPv4(209,221,192,0),19 },
+       { IPv4(209,221,224,0),24 },
+       { IPv4(209,221,225,0),24 },
+       { IPv4(209,221,226,0),24 },
+       { IPv4(209,222,32,0),20 },
+       { IPv4(209,222,64,0),18 },
+       { IPv4(209,223,100,0),24 },
+       { IPv4(209,223,131,0),24 },
+       { IPv4(209,223,152,0),23 },
+       { IPv4(209,223,183,0),24 },
+       { IPv4(209,223,189,0),24 },
+       { IPv4(209,223,200,0),21 },
+       { IPv4(209,224,160,0),24 },
+       { IPv4(209,224,161,0),24 },
+       { IPv4(209,224,162,0),24 },
+       { IPv4(209,224,163,0),24 },
+       { IPv4(209,224,164,0),24 },
+       { IPv4(209,224,165,0),24 },
+       { IPv4(209,224,166,0),24 },
+       { IPv4(209,224,167,0),24 },
+       { IPv4(209,224,204,0),22 },
+       { IPv4(209,224,208,0),21 },
+       { IPv4(209,224,223,0),24 },
+       { IPv4(209,225,36,0),22 },
+       { IPv4(209,225,49,0),24 },
+       { IPv4(209,225,128,0),18 },
+       { IPv4(209,227,0,0),17 },
+       { IPv4(209,227,18,0),24 },
+       { IPv4(209,227,36,0),24 },
+       { IPv4(209,227,62,0),24 },
+       { IPv4(209,227,128,0),18 },
+       { IPv4(209,227,128,0),19 },
+       { IPv4(209,227,130,0),23 },
+       { IPv4(209,227,132,0),22 },
+       { IPv4(209,227,136,0),23 },
+       { IPv4(209,227,138,0),23 },
+       { IPv4(209,227,140,0),23 },
+       { IPv4(209,227,144,0),22 },
+       { IPv4(209,227,148,0),23 },
+       { IPv4(209,227,152,0),23 },
+       { IPv4(209,227,154,0),23 },
+       { IPv4(209,227,160,0),19 },
+       { IPv4(209,227,166,0),23 },
+       { IPv4(209,227,188,0),22 },
+       { IPv4(209,227,188,0),23 },
+       { IPv4(209,227,190,0),23 },
+       { IPv4(209,227,192,0),18 },
+       { IPv4(209,228,22,0),24 },
+       { IPv4(209,228,176,0),20 },
+       { IPv4(209,229,80,0),20 },
+       { IPv4(209,232,144,0),20 },
+       { IPv4(209,233,156,0),22 },
+       { IPv4(209,234,0,0),18 },
+       { IPv4(209,234,64,0),19 },
+       { IPv4(209,234,88,0),24 },
+       { IPv4(209,234,89,0),24 },
+       { IPv4(209,234,96,0),20 },
+       { IPv4(209,234,128,0),21 },
+       { IPv4(209,234,128,0),24 },
+       { IPv4(209,234,130,0),23 },
+       { IPv4(209,234,132,0),23 },
+       { IPv4(209,234,134,0),24 },
+       { IPv4(209,234,136,0),21 },
+       { IPv4(209,234,139,0),24 },
+       { IPv4(209,234,144,0),21 },
+       { IPv4(209,234,147,0),24 },
+       { IPv4(209,234,150,0),24 },
+       { IPv4(209,234,155,0),24 },
+       { IPv4(209,234,168,0),22 },
+       { IPv4(209,234,176,0),22 },
+       { IPv4(209,234,176,0),21 },
+       { IPv4(209,234,184,0),21 },
+       { IPv4(209,234,186,0),24 },
+       { IPv4(209,234,192,0),21 },
+       { IPv4(209,234,194,0),24 },
+       { IPv4(209,234,196,0),24 },
+       { IPv4(209,234,199,0),24 },
+       { IPv4(209,234,216,0),24 },
+       { IPv4(209,234,217,0),24 },
+       { IPv4(209,234,218,0),23 },
+       { IPv4(209,234,218,0),24 },
+       { IPv4(209,234,219,0),24 },
+       { IPv4(209,234,220,0),22 },
+       { IPv4(209,236,0,0),18 },
+       { IPv4(209,236,64,0),19 },
+       { IPv4(209,236,96,0),19 },
+       { IPv4(209,236,170,0),24 },
+       { IPv4(209,236,171,0),24 },
+       { IPv4(209,236,194,0),24 },
+       { IPv4(209,237,0,0),18 },
+       { IPv4(209,237,12,0),24 },
+       { IPv4(209,237,13,0),24 },
+       { IPv4(209,237,14,0),24 },
+       { IPv4(209,237,15,0),24 },
+       { IPv4(209,237,56,0),23 },
+       { IPv4(209,237,58,0),24 },
+       { IPv4(209,237,59,0),24 },
+       { IPv4(209,237,103,0),24 },
+       { IPv4(209,237,104,0),24 },
+       { IPv4(209,237,105,0),24 },
+       { IPv4(209,237,106,0),24 },
+       { IPv4(209,237,107,0),24 },
+       { IPv4(209,238,0,0),16 },
+       { IPv4(209,239,64,0),19 },
+       { IPv4(209,239,80,0),24 },
+       { IPv4(209,239,128,0),19 },
+       { IPv4(209,240,32,0),19 },
+       { IPv4(209,240,96,0),19 },
+       { IPv4(209,240,128,0),19 },
+       { IPv4(209,240,192,0),19 },
+       { IPv4(209,240,198,0),24 },
+       { IPv4(209,240,199,0),24 },
+       { IPv4(209,240,224,0),19 },
+       { IPv4(209,241,0,0),24 },
+       { IPv4(209,241,158,0),24 },
+       { IPv4(209,241,222,0),23 },
+       { IPv4(209,241,234,0),24 },
+       { IPv4(209,241,235,0),24 },
+       { IPv4(209,241,243,0),24 },
+       { IPv4(209,241,244,0),24 },
+       { IPv4(209,241,245,0),24 },
+       { IPv4(209,242,128,0),19 },
+       { IPv4(209,242,160,0),20 },
+       { IPv4(209,243,32,0),20 },
+       { IPv4(209,243,92,0),24 },
+       { IPv4(209,243,94,0),24 },
+       { IPv4(209,243,101,0),24 },
+       { IPv4(209,243,102,0),23 },
+       { IPv4(209,243,107,0),24 },
+       { IPv4(209,243,109,0),24 },
+       { IPv4(209,243,110,0),24 },
+       { IPv4(209,244,203,0),24 },
+       { IPv4(209,244,216,0),23 },
+       { IPv4(209,245,21,0),24 },
+       { IPv4(209,245,89,0),24 },
+       { IPv4(209,246,37,0),24 },
+       { IPv4(209,246,151,0),24 },
+       { IPv4(209,247,96,0),23 },
+       { IPv4(209,248,64,0),18 },
+       { IPv4(209,249,0,0),16 },
+       { IPv4(209,249,2,0),24 },
+       { IPv4(209,249,51,0),24 },
+       { IPv4(209,249,70,0),24 },
+       { IPv4(209,249,76,0),23 },
+       { IPv4(209,249,92,0),22 },
+       { IPv4(209,249,113,0),24 },
+       { IPv4(209,249,114,0),24 },
+       { IPv4(209,249,173,0),24 },
+       { IPv4(209,249,174,0),24 },
+       { IPv4(209,249,239,0),24 },
+       { IPv4(209,249,240,0),24 },
+       { IPv4(209,249,246,0),23 },
+       { IPv4(209,249,250,0),24 },
+       { IPv4(209,250,160,0),19 },
+       { IPv4(209,251,32,0),21 },
+       { IPv4(209,251,40,0),21 },
+       { IPv4(209,251,192,0),19 },
+       { IPv4(210,4,24,0),24 },
+       { IPv4(210,4,25,0),24 },
+       { IPv4(210,4,26,0),24 },
+       { IPv4(210,4,27,0),24 },
+       { IPv4(210,4,56,0),24 },
+       { IPv4(210,4,128,0),20 },
+       { IPv4(210,4,144,0),20 },
+       { IPv4(210,7,96,0),19 },
+       { IPv4(210,7,104,0),22 },
+       { IPv4(210,7,112,0),22 },
+       { IPv4(210,7,128,0),19 },
+       { IPv4(210,7,160,0),19 },
+       { IPv4(210,7,191,0),24 },
+       { IPv4(210,7,192,0),19 },
+       { IPv4(210,7,199,0),24 },
+       { IPv4(210,7,221,0),24 },
+       { IPv4(210,7,222,0),24 },
+       { IPv4(210,7,224,0),19 },
+       { IPv4(210,8,0,0),14 },
+       { IPv4(210,8,4,0),23 },
+       { IPv4(210,8,30,0),23 },
+       { IPv4(210,9,16,0),20 },
+       { IPv4(210,9,44,0),22 },
+       { IPv4(210,10,124,0),24 },
+       { IPv4(210,10,124,0),22 },
+       { IPv4(210,10,125,0),24 },
+       { IPv4(210,10,126,0),24 },
+       { IPv4(210,10,127,0),24 },
+       { IPv4(210,12,32,0),19 },
+       { IPv4(210,15,80,0),20 },
+       { IPv4(210,15,88,0),21 },
+       { IPv4(210,16,0,0),20 },
+       { IPv4(210,16,0,0),17 },
+       { IPv4(210,16,16,0),20 },
+       { IPv4(210,16,32,0),20 },
+       { IPv4(210,16,48,0),20 },
+       { IPv4(210,16,64,0),22 },
+       { IPv4(210,16,68,0),22 },
+       { IPv4(210,16,72,0),22 },
+       { IPv4(210,16,80,0),22 },
+       { IPv4(210,16,84,0),22 },
+       { IPv4(210,16,100,0),24 },
+       { IPv4(210,16,101,0),24 },
+       { IPv4(210,16,102,0),24 },
+       { IPv4(210,16,103,0),24 },
+       { IPv4(210,16,104,0),24 },
+       { IPv4(210,16,127,0),24 },
+       { IPv4(210,17,0,0),17 },
+       { IPv4(210,18,0,0),17 },
+       { IPv4(210,18,64,0),20 },
+       { IPv4(210,18,64,0),21 },
+       { IPv4(210,18,76,0),22 },
+       { IPv4(210,18,80,0),20 },
+       { IPv4(210,18,88,0),21 },
+       { IPv4(210,18,92,0),22 },
+       { IPv4(210,18,96,0),20 },
+       { IPv4(210,18,96,0),21 },
+       { IPv4(210,18,104,0),23 },
+       { IPv4(210,19,0,0),18 },
+       { IPv4(210,19,0,0),19 },
+       { IPv4(210,19,16,0),20 },
+       { IPv4(210,19,32,0),19 },
+       { IPv4(210,19,48,0),20 },
+       { IPv4(210,19,64,0),19 },
+       { IPv4(210,19,64,0),18 },
+       { IPv4(210,19,96,0),19 },
+       { IPv4(210,21,0,0),16 },
+       { IPv4(210,22,0,0),16 },
+       { IPv4(210,23,96,0),19 },
+       { IPv4(210,23,112,0),20 },
+       { IPv4(210,23,115,0),24 },
+       { IPv4(210,23,128,0),19 },
+       { IPv4(210,23,133,0),24 },
+       { IPv4(210,23,142,0),23 },
+       { IPv4(210,23,144,0),22 },
+       { IPv4(210,23,154,0),23 },
+       { IPv4(210,23,156,0),23 },
+       { IPv4(210,23,192,0),19 },
+       { IPv4(210,23,208,0),20 },
+       { IPv4(210,23,208,0),22 },
+       { IPv4(210,23,235,0),24 },
+       { IPv4(210,23,239,0),24 },
+       { IPv4(210,23,240,0),20 },
+       { IPv4(210,23,253,0),24 },
+       { IPv4(210,23,254,0),23 },
+       { IPv4(210,24,64,0),18 },
+       { IPv4(210,24,208,0),20 },
+       { IPv4(210,24,224,0),22 },
+       { IPv4(210,25,0,0),17 },
+       { IPv4(210,50,30,0),24 },
+       { IPv4(210,50,48,0),23 },
+       { IPv4(210,50,51,0),24 },
+       { IPv4(210,50,52,0),22 },
+       { IPv4(210,50,56,0),21 },
+       { IPv4(210,50,64,0),20 },
+       { IPv4(210,50,104,0),21 },
+       { IPv4(210,50,124,0),22 },
+       { IPv4(210,51,0,0),16 },
+       { IPv4(210,52,0,0),16 },
+       { IPv4(210,53,0,0),16 },
+       { IPv4(210,54,128,0),17 },
+       { IPv4(210,54,211,0),24 },
+       { IPv4(210,55,0,0),17 },
+       { IPv4(210,55,5,0),24 },
+       { IPv4(210,55,111,0),24 },
+       { IPv4(210,55,128,0),17 },
+       { IPv4(210,55,155,0),24 },
+       { IPv4(210,55,157,0),24 },
+       { IPv4(210,55,202,0),24 },
+       { IPv4(210,55,254,0),24 },
+       { IPv4(210,56,0,0),19 },
+       { IPv4(210,56,2,0),24 },
+       { IPv4(210,56,6,0),24 },
+       { IPv4(210,56,7,0),24 },
+       { IPv4(210,56,9,0),24 },
+       { IPv4(210,56,10,0),24 },
+       { IPv4(210,56,15,0),24 },
+       { IPv4(210,56,16,0),24 },
+       { IPv4(210,56,17,0),24 },
+       { IPv4(210,56,18,0),24 },
+       { IPv4(210,56,19,0),24 },
+       { IPv4(210,56,20,0),24 },
+       { IPv4(210,56,21,0),24 },
+       { IPv4(210,56,22,0),24 },
+       { IPv4(210,56,23,0),24 },
+       { IPv4(210,58,0,0),16 },
+       { IPv4(210,58,0,0),18 },
+       { IPv4(210,58,64,0),18 },
+       { IPv4(210,58,128,0),18 },
+       { IPv4(210,58,192,0),18 },
+       { IPv4(210,59,0,0),17 },
+       { IPv4(210,60,0,0),16 },
+       { IPv4(210,60,225,0),24 },
+       { IPv4(210,62,64,0),19 },
+       { IPv4(210,62,128,0),19 },
+       { IPv4(210,62,160,0),20 },
+       { IPv4(210,62,224,0),20 },
+       { IPv4(210,62,240,0),21 },
+       { IPv4(210,63,64,0),18 },
+       { IPv4(210,64,0,0),24 },
+       { IPv4(210,64,0,0),16 },
+       { IPv4(210,64,0,0),18 },
+       { IPv4(210,64,192,0),18 },
+       { IPv4(210,66,0,0),16 },
+       { IPv4(210,66,64,0),18 },
+       { IPv4(210,66,128,0),18 },
+       { IPv4(210,67,64,0),19 },
+       { IPv4(210,67,248,0),21 },
+       { IPv4(210,68,0,0),16 },
+       { IPv4(210,68,0,0),24 },
+       { IPv4(210,69,0,0),16 },
+       { IPv4(210,70,0,0),16 },
+       { IPv4(210,71,0,0),17 },
+       { IPv4(210,72,0,0),19 },
+       { IPv4(210,72,32,0),19 },
+       { IPv4(210,72,64,0),18 },
+       { IPv4(210,72,128,0),19 },
+       { IPv4(210,72,160,0),19 },
+       { IPv4(210,72,192,0),19 },
+       { IPv4(210,72,224,0),19 },
+       { IPv4(210,73,0,0),18 },
+       { IPv4(210,73,64,0),19 },
+       { IPv4(210,73,96,0),19 },
+       { IPv4(210,73,128,0),19 },
+       { IPv4(210,73,160,0),19 },
+       { IPv4(210,73,224,0),19 },
+       { IPv4(210,74,32,0),19 },
+       { IPv4(210,74,64,0),23 },
+       { IPv4(210,74,160,0),19 },
+       { IPv4(210,74,192,0),19 },
+       { IPv4(210,74,224,0),19 },
+       { IPv4(210,75,32,0),19 },
+       { IPv4(210,75,96,0),19 },
+       { IPv4(210,75,128,0),19 },
+       { IPv4(210,75,192,0),19 },
+       { IPv4(210,75,224,0),22 },
+       { IPv4(210,75,240,0),20 },
+       { IPv4(210,76,32,0),19 },
+       { IPv4(210,76,96,0),19 },
+       { IPv4(210,76,192,0),19 },
+       { IPv4(210,77,0,0),19 },
+       { IPv4(210,77,32,0),21 },
+       { IPv4(210,77,40,0),21 },
+       { IPv4(210,77,48,0),20 },
+       { IPv4(210,77,128,0),19 },
+       { IPv4(210,77,160,0),20 },
+       { IPv4(210,77,192,0),19 },
+       { IPv4(210,77,224,0),19 },
+       { IPv4(210,77,224,0),20 },
+       { IPv4(210,77,240,0),20 },
+       { IPv4(210,78,2,0),24 },
+       { IPv4(210,78,4,0),22 },
+       { IPv4(210,78,8,0),21 },
+       { IPv4(210,78,16,0),20 },
+       { IPv4(210,78,128,0),19 },
+       { IPv4(210,79,224,0),19 },
+       { IPv4(210,80,129,0),24 },
+       { IPv4(210,82,0,0),16 },
+       { IPv4(210,83,0,0),16 },
+       { IPv4(210,85,0,0),18 },
+       { IPv4(210,85,0,0),16 },
+       { IPv4(210,85,64,0),18 },
+       { IPv4(210,85,128,0),18 },
+       { IPv4(210,85,192,0),18 },
+       { IPv4(210,88,0,0),17 },
+       { IPv4(210,88,128,0),18 },
+       { IPv4(210,88,192,0),19 },
+       { IPv4(210,90,0,0),17 },
+       { IPv4(210,90,0,0),24 },
+       { IPv4(210,90,0,0),16 },
+       { IPv4(210,90,21,0),24 },
+       { IPv4(210,90,128,0),17 },
+       { IPv4(210,91,0,0),16 },
+       { IPv4(210,91,8,0),24 },
+       { IPv4(210,92,0,0),18 },
+       { IPv4(210,92,0,0),24 },
+       { IPv4(210,92,1,0),24 },
+       { IPv4(210,92,2,0),24 },
+       { IPv4(210,92,3,0),24 },
+       { IPv4(210,92,4,0),24 },
+       { IPv4(210,92,5,0),24 },
+       { IPv4(210,92,6,0),24 },
+       { IPv4(210,92,7,0),24 },
+       { IPv4(210,92,8,0),24 },
+       { IPv4(210,92,9,0),24 },
+       { IPv4(210,92,10,0),24 },
+       { IPv4(210,92,12,0),24 },
+       { IPv4(210,92,13,0),24 },
+       { IPv4(210,92,14,0),24 },
+       { IPv4(210,92,40,0),24 },
+       { IPv4(210,92,64,0),18 },
+       { IPv4(210,92,73,0),24 },
+       { IPv4(210,92,91,0),24 },
+       { IPv4(210,92,114,0),24 },
+       { IPv4(210,92,127,0),24 },
+       { IPv4(210,92,128,0),17 },
+       { IPv4(210,93,0,0),17 },
+       { IPv4(210,93,6,0),23 },
+       { IPv4(210,93,8,0),21 },
+       { IPv4(210,93,68,0),24 },
+       { IPv4(210,93,69,0),24 },
+       { IPv4(210,93,70,0),24 },
+       { IPv4(210,93,83,0),24 },
+       { IPv4(210,93,84,0),22 },
+       { IPv4(210,93,84,0),24 },
+       { IPv4(210,93,85,0),24 },
+       { IPv4(210,93,86,0),24 },
+       { IPv4(210,93,87,0),24 },
+       { IPv4(210,93,112,0),20 },
+       { IPv4(210,93,128,0),23 },
+       { IPv4(210,93,130,0),24 },
+       { IPv4(210,93,131,0),24 },
+       { IPv4(210,93,132,0),22 },
+       { IPv4(210,93,136,0),21 },
+       { IPv4(210,93,144,0),20 },
+       { IPv4(210,93,160,0),19 },
+       { IPv4(210,94,0,0),19 },
+       { IPv4(210,94,64,0),18 },
+       { IPv4(210,94,128,0),19 },
+       { IPv4(210,94,160,0),19 },
+       { IPv4(210,94,224,0),19 },
+       { IPv4(210,94,245,0),24 },
+       { IPv4(210,94,246,0),24 },
+       { IPv4(210,95,0,0),17 },
+       { IPv4(210,95,0,0),16 },
+       { IPv4(210,95,128,0),17 },
+       { IPv4(210,95,192,0),24 },
+       { IPv4(210,95,193,0),24 },
+       { IPv4(210,95,194,0),24 },
+       { IPv4(210,95,199,0),24 },
+       { IPv4(210,96,0,0),17 },
+       { IPv4(210,96,0,0),18 },
+       { IPv4(210,96,64,0),18 },
+       { IPv4(210,96,128,0),17 },
+       { IPv4(210,96,132,0),24 },
+       { IPv4(210,96,162,0),24 },
+       { IPv4(210,96,163,0),24 },
+       { IPv4(210,96,164,0),24 },
+       { IPv4(210,96,165,0),24 },
+       { IPv4(210,96,166,0),24 },
+       { IPv4(210,96,214,0),24 },
+       { IPv4(210,96,235,0),24 },
+       { IPv4(210,97,0,0),17 },
+       { IPv4(210,97,0,0),18 },
+       { IPv4(210,97,64,0),18 },
+       { IPv4(210,97,68,0),23 },
+       { IPv4(210,97,128,0),19 },
+       { IPv4(210,97,140,0),23 },
+       { IPv4(210,97,142,0),24 },
+       { IPv4(210,97,224,0),20 },
+       { IPv4(210,97,240,0),20 },
+       { IPv4(210,98,0,0),19 },
+       { IPv4(210,98,16,0),21 },
+       { IPv4(210,98,38,0),24 },
+       { IPv4(210,98,39,0),24 },
+       { IPv4(210,98,40,0),21 },
+       { IPv4(210,98,40,0),22 },
+       { IPv4(210,98,45,0),24 },
+       { IPv4(210,98,48,0),20 },
+       { IPv4(210,98,64,0),18 },
+       { IPv4(210,98,128,0),18 },
+       { IPv4(210,98,192,0),19 },
+       { IPv4(210,98,224,0),19 },
+       { IPv4(210,99,0,0),17 },
+       { IPv4(210,99,64,0),18 },
+       { IPv4(210,99,128,0),18 },
+       { IPv4(210,99,128,0),17 },
+       { IPv4(210,99,187,0),24 },
+       { IPv4(210,99,192,0),18 },
+       { IPv4(210,100,0,0),17 },
+       { IPv4(210,100,0,0),18 },
+       { IPv4(210,100,64,0),18 },
+       { IPv4(210,100,128,0),17 },
+       { IPv4(210,100,128,0),18 },
+       { IPv4(210,100,192,0),18 },
+       { IPv4(210,101,0,0),18 },
+       { IPv4(210,101,0,0),19 },
+       { IPv4(210,101,32,0),19 },
+       { IPv4(210,101,64,0),18 },
+       { IPv4(210,101,84,0),24 },
+       { IPv4(210,101,85,0),24 },
+       { IPv4(210,101,128,0),18 },
+       { IPv4(210,101,192,0),19 },
+       { IPv4(210,101,224,0),20 },
+       { IPv4(210,101,240,0),21 },
+       { IPv4(210,101,248,0),22 },
+       { IPv4(210,101,252,0),23 },
+       { IPv4(210,101,254,0),23 },
+       { IPv4(210,102,32,0),19 },
+       { IPv4(210,102,64,0),19 },
+       { IPv4(210,102,96,0),19 },
+       { IPv4(210,102,128,0),17 },
+       { IPv4(210,102,136,0),22 },
+       { IPv4(210,102,208,0),21 },
+       { IPv4(210,102,216,0),22 },
+       { IPv4(210,103,0,0),18 },
+       { IPv4(210,103,0,0),17 },
+       { IPv4(210,103,64,0),18 },
+       { IPv4(210,103,73,0),24 },
+       { IPv4(210,104,0,0),16 },
+       { IPv4(210,104,0,0),17 },
+       { IPv4(210,104,128,0),24 },
+       { IPv4(210,104,128,0),17 },
+       { IPv4(210,104,129,0),24 },
+       { IPv4(210,104,132,0),22 },
+       { IPv4(210,104,132,0),24 },
+       { IPv4(210,104,133,0),24 },
+       { IPv4(210,104,134,0),24 },
+       { IPv4(210,104,135,0),24 },
+       { IPv4(210,104,203,0),24 },
+       { IPv4(210,105,0,0),16 },
+       { IPv4(210,105,108,0),24 },
+       { IPv4(210,106,0,0),18 },
+       { IPv4(210,106,64,0),18 },
+       { IPv4(210,106,76,0),22 },
+       { IPv4(210,106,80,0),22 },
+       { IPv4(210,106,87,0),24 },
+       { IPv4(210,106,96,0),21 },
+       { IPv4(210,106,104,0),22 },
+       { IPv4(210,106,108,0),23 },
+       { IPv4(210,106,128,0),18 },
+       { IPv4(210,106,192,0),19 },
+       { IPv4(210,106,224,0),19 },
+       { IPv4(210,107,0,0),17 },
+       { IPv4(210,107,66,0),24 },
+       { IPv4(210,107,75,0),24 },
+       { IPv4(210,107,128,0),18 },
+       { IPv4(210,107,192,0),19 },
+       { IPv4(210,107,192,0),20 },
+       { IPv4(210,107,199,0),24 },
+       { IPv4(210,107,201,0),24 },
+       { IPv4(210,107,202,0),24 },
+       { IPv4(210,107,208,0),24 },
+       { IPv4(210,107,209,0),24 },
+       { IPv4(210,107,210,0),24 },
+       { IPv4(210,107,211,0),24 },
+       { IPv4(210,107,212,0),22 },
+       { IPv4(210,107,224,0),20 },
+       { IPv4(210,107,240,0),22 },
+       { IPv4(210,107,240,0),20 },
+       { IPv4(210,107,244,0),23 },
+       { IPv4(210,108,0,0),16 },
+       { IPv4(210,108,26,0),24 },
+       { IPv4(210,108,27,0),24 },
+       { IPv4(210,108,80,0),22 },
+       { IPv4(210,108,84,0),24 },
+       { IPv4(210,108,137,0),24 },
+       { IPv4(210,108,149,0),24 },
+       { IPv4(210,108,230,0),24 },
+       { IPv4(210,108,231,0),24 },
+       { IPv4(210,109,128,0),22 },
+       { IPv4(210,109,149,0),24 },
+       { IPv4(210,110,0,0),17 },
+       { IPv4(210,110,80,0),21 },
+       { IPv4(210,110,88,0),22 },
+       { IPv4(210,110,128,0),17 },
+       { IPv4(210,110,128,0),18 },
+       { IPv4(210,110,136,0),23 },
+       { IPv4(210,110,138,0),24 },
+       { IPv4(210,110,139,0),24 },
+       { IPv4(210,110,140,0),24 },
+       { IPv4(210,110,160,0),20 },
+       { IPv4(210,110,176,0),22 },
+       { IPv4(210,110,180,0),23 },
+       { IPv4(210,110,182,0),23 },
+       { IPv4(210,110,184,0),21 },
+       { IPv4(210,110,192,0),19 },
+       { IPv4(210,110,200,0),22 },
+       { IPv4(210,110,240,0),22 },
+       { IPv4(210,110,248,0),22 },
+       { IPv4(210,110,253,0),24 },
+       { IPv4(210,111,0,0),17 },
+       { IPv4(210,111,0,0),18 },
+       { IPv4(210,111,27,0),24 },
+       { IPv4(210,111,28,0),24 },
+       { IPv4(210,111,36,0),24 },
+       { IPv4(210,111,37,0),24 },
+       { IPv4(210,111,64,0),18 },
+       { IPv4(210,111,192,0),19 },
+       { IPv4(210,111,224,0),19 },
+       { IPv4(210,112,0,0),17 },
+       { IPv4(210,112,128,0),19 },
+       { IPv4(210,112,177,0),24 },
+       { IPv4(210,113,0,0),16 },
+       { IPv4(210,113,104,0),24 },
+       { IPv4(210,114,0,0),18 },
+       { IPv4(210,114,80,0),20 },
+       { IPv4(210,114,96,0),19 },
+       { IPv4(210,114,106,0),24 },
+       { IPv4(210,114,117,0),24 },
+       { IPv4(210,114,128,0),17 },
+       { IPv4(210,114,128,0),18 },
+       { IPv4(210,114,192,0),18 },
+       { IPv4(210,115,0,0),20 },
+       { IPv4(210,115,16,0),20 },
+       { IPv4(210,115,32,0),19 },
+       { IPv4(210,115,128,0),19 },
+       { IPv4(210,115,136,0),22 },
+       { IPv4(210,115,140,0),22 },
+       { IPv4(210,115,150,0),24 },
+       { IPv4(210,115,151,0),24 },
+       { IPv4(210,115,160,0),19 },
+       { IPv4(210,115,192,0),19 },
+       { IPv4(210,115,192,0),24 },
+       { IPv4(210,115,193,0),24 },
+       { IPv4(210,115,194,0),24 },
+       { IPv4(210,115,222,0),24 },
+       { IPv4(210,115,224,0),19 },
+       { IPv4(210,116,128,0),17 },
+       { IPv4(210,117,0,0),17 },
+       { IPv4(210,117,128,0),18 },
+       { IPv4(210,117,192,0),18 },
+       { IPv4(210,118,128,0),17 },
+       { IPv4(210,118,128,0),18 },
+       { IPv4(210,118,175,0),24 },
+       { IPv4(210,118,177,0),24 },
+       { IPv4(210,118,192,0),18 },
+       { IPv4(210,119,0,0),20 },
+       { IPv4(210,119,0,0),18 },
+       { IPv4(210,119,0,0),17 },
+       { IPv4(210,119,16,0),22 },
+       { IPv4(210,119,20,0),23 },
+       { IPv4(210,119,22,0),24 },
+       { IPv4(210,119,24,0),21 },
+       { IPv4(210,119,32,0),22 },
+       { IPv4(210,119,64,0),20 },
+       { IPv4(210,119,76,0),22 },
+       { IPv4(210,119,80,0),22 },
+       { IPv4(210,119,102,0),23 },
+       { IPv4(210,119,104,0),21 },
+       { IPv4(210,119,112,0),24 },
+       { IPv4(210,119,114,0),24 },
+       { IPv4(210,119,116,0),24 },
+       { IPv4(210,119,117,0),24 },
+       { IPv4(210,119,118,0),24 },
+       { IPv4(210,119,119,0),24 },
+       { IPv4(210,119,120,0),24 },
+       { IPv4(210,119,121,0),24 },
+       { IPv4(210,119,122,0),24 },
+       { IPv4(210,119,128,0),17 },
+       { IPv4(210,119,188,0),22 },
+       { IPv4(210,120,0,0),16 },
+       { IPv4(210,120,14,0),24 },
+       { IPv4(210,120,25,0),24 },
+       { IPv4(210,120,73,0),24 },
+       { IPv4(210,120,88,0),24 },
+       { IPv4(210,120,89,0),24 },
+       { IPv4(210,120,90,0),24 },
+       { IPv4(210,121,0,0),17 },
+       { IPv4(210,121,128,0),17 },
+       { IPv4(210,121,183,0),24 },
+       { IPv4(210,121,223,0),24 },
+       { IPv4(210,122,16,0),20 },
+       { IPv4(210,122,68,0),24 },
+       { IPv4(210,122,94,0),24 },
+       { IPv4(210,122,96,0),24 },
+       { IPv4(210,122,97,0),24 },
+       { IPv4(210,122,98,0),24 },
+       { IPv4(210,122,99,0),24 },
+       { IPv4(210,122,100,0),24 },
+       { IPv4(210,122,101,0),24 },
+       { IPv4(210,123,0,0),16 },
+       { IPv4(210,123,14,0),24 },
+       { IPv4(210,123,80,0),24 },
+       { IPv4(210,123,108,0),24 },
+       { IPv4(210,123,121,0),24 },
+       { IPv4(210,124,0,0),16 },
+       { IPv4(210,124,12,0),24 },
+       { IPv4(210,124,13,0),24 },
+       { IPv4(210,124,36,0),24 },
+       { IPv4(210,124,155,0),24 },
+       { IPv4(210,124,169,0),24 },
+       { IPv4(210,124,170,0),24 },
+       { IPv4(210,124,204,0),24 },
+       { IPv4(210,124,205,0),24 },
+       { IPv4(210,125,0,0),24 },
+       { IPv4(210,125,0,0),18 },
+       { IPv4(210,125,1,0),24 },
+       { IPv4(210,125,2,0),24 },
+       { IPv4(210,125,3,0),24 },
+       { IPv4(210,125,4,0),24 },
+       { IPv4(210,125,5,0),24 },
+       { IPv4(210,125,6,0),24 },
+       { IPv4(210,125,7,0),24 },
+       { IPv4(210,125,16,0),20 },
+       { IPv4(210,125,56,0),24 },
+       { IPv4(210,125,56,0),22 },
+       { IPv4(210,125,57,0),24 },
+       { IPv4(210,125,58,0),24 },
+       { IPv4(210,125,60,0),22 },
+       { IPv4(210,125,60,0),23 },
+       { IPv4(210,125,62,0),23 },
+       { IPv4(210,125,64,0),19 },
+       { IPv4(210,125,64,0),22 },
+       { IPv4(210,125,64,0),21 },
+       { IPv4(210,125,68,0),22 },
+       { IPv4(210,125,72,0),22 },
+       { IPv4(210,125,76,0),22 },
+       { IPv4(210,125,82,0),24 },
+       { IPv4(210,125,84,0),22 },
+       { IPv4(210,125,88,0),21 },
+       { IPv4(210,125,96,0),21 },
+       { IPv4(210,125,104,0),22 },
+       { IPv4(210,125,108,0),22 },
+       { IPv4(210,125,112,0),20 },
+       { IPv4(210,125,128,0),17 },
+       { IPv4(210,125,160,0),24 },
+       { IPv4(210,125,161,0),24 },
+       { IPv4(210,125,162,0),24 },
+       { IPv4(210,125,176,0),21 },
+       { IPv4(210,125,184,0),24 },
+       { IPv4(210,125,192,0),24 },
+       { IPv4(210,125,193,0),24 },
+       { IPv4(210,125,194,0),24 },
+       { IPv4(210,125,195,0),24 },
+       { IPv4(210,125,196,0),24 },
+       { IPv4(210,125,197,0),24 },
+       { IPv4(210,125,198,0),24 },
+       { IPv4(210,125,199,0),24 },
+       { IPv4(210,125,240,0),21 },
+       { IPv4(210,126,0,0),17 },
+       { IPv4(210,126,140,0),24 },
+       { IPv4(210,126,155,0),24 },
+       { IPv4(210,126,206,0),24 },
+       { IPv4(210,127,0,0),19 },
+       { IPv4(210,127,32,0),24 },
+       { IPv4(210,127,33,0),24 },
+       { IPv4(210,127,34,0),23 },
+       { IPv4(210,127,36,0),24 },
+       { IPv4(210,127,37,0),24 },
+       { IPv4(210,127,38,0),23 },
+       { IPv4(210,127,40,0),24 },
+       { IPv4(210,127,41,0),24 },
+       { IPv4(210,127,42,0),23 },
+       { IPv4(210,127,44,0),24 },
+       { IPv4(210,127,45,0),24 },
+       { IPv4(210,127,46,0),23 },
+       { IPv4(210,127,48,0),23 },
+       { IPv4(210,127,50,0),24 },
+       { IPv4(210,127,51,0),24 },
+       { IPv4(210,127,52,0),22 },
+       { IPv4(210,127,128,0),18 },
+       { IPv4(210,127,192,0),18 },
+       { IPv4(210,127,197,0),24 },
+       { IPv4(210,127,201,0),24 },
+       { IPv4(210,127,208,0),21 },
+       { IPv4(210,127,216,0),22 },
+       { IPv4(210,127,220,0),23 },
+       { IPv4(210,127,233,0),24 },
+       { IPv4(210,131,128,0),17 },
+       { IPv4(210,132,64,0),19 },
+       { IPv4(210,132,96,0),19 },
+       { IPv4(210,133,96,0),19 },
+       { IPv4(210,133,192,0),18 },
+       { IPv4(210,134,128,0),19 },
+       { IPv4(210,134,160,0),19 },
+       { IPv4(210,134,224,0),19 },
+       { IPv4(210,135,0,0),20 },
+       { IPv4(210,135,32,0),20 },
+       { IPv4(210,135,48,0),21 },
+       { IPv4(210,135,56,0),22 },
+       { IPv4(210,135,192,0),20 },
+       { IPv4(210,135,208,0),20 },
+       { IPv4(210,141,0,0),20 },
+       { IPv4(210,141,32,0),19 },
+       { IPv4(210,141,64,0),19 },
+       { IPv4(210,141,96,0),19 },
+       { IPv4(210,141,128,0),19 },
+       { IPv4(210,141,160,0),19 },
+       { IPv4(210,142,0,0),18 },
+       { IPv4(210,142,224,0),19 },
+       { IPv4(210,143,64,0),19 },
+       { IPv4(210,143,96,0),19 },
+       { IPv4(210,146,64,0),18 },
+       { IPv4(210,146,128,0),17 },
+       { IPv4(210,147,0,0),16 },
+       { IPv4(210,151,128,0),17 },
+       { IPv4(210,155,0,0),17 },
+       { IPv4(210,155,128,0),19 },
+       { IPv4(210,157,128,0),20 },
+       { IPv4(210,157,128,0),19 },
+       { IPv4(210,157,160,0),24 },
+       { IPv4(210,157,161,0),24 },
+       { IPv4(210,157,162,0),24 },
+       { IPv4(210,157,163,0),24 },
+       { IPv4(210,157,166,0),24 },
+       { IPv4(210,157,168,0),24 },
+       { IPv4(210,157,171,0),24 },
+       { IPv4(210,157,176,0),24 },
+       { IPv4(210,157,178,0),24 },
+       { IPv4(210,157,179,0),24 },
+       { IPv4(210,157,180,0),24 },
+       { IPv4(210,157,181,0),24 },
+       { IPv4(210,157,182,0),24 },
+       { IPv4(210,157,192,0),19 },
+       { IPv4(210,157,224,0),19 },
+       { IPv4(210,158,64,0),19 },
+       { IPv4(210,158,224,0),19 },
+       { IPv4(210,159,64,0),19 },
+       { IPv4(210,166,32,0),19 },
+       { IPv4(210,166,64,0),19 },
+       { IPv4(210,166,224,0),19 },
+       { IPv4(210,168,192,0),18 },
+       { IPv4(210,169,0,0),17 },
+       { IPv4(210,170,0,0),18 },
+       { IPv4(210,171,0,0),19 },
+       { IPv4(210,171,0,0),20 },
+       { IPv4(210,171,64,0),19 },
+       { IPv4(210,171,192,0),19 },
+       { IPv4(210,171,224,0),19 },
+       { IPv4(210,172,64,0),18 },
+       { IPv4(210,172,192,0),19 },
+       { IPv4(210,172,224,0),20 },
+       { IPv4(210,172,238,0),23 },
+       { IPv4(210,173,0,0),19 },
+       { IPv4(210,173,128,0),22 },
+       { IPv4(210,174,64,0),18 },
+       { IPv4(210,174,128,0),17 },
+       { IPv4(210,175,128,0),23 },
+       { IPv4(210,175,138,0),23 },
+       { IPv4(210,175,152,0),21 },
+       { IPv4(210,175,160,0),19 },
+       { IPv4(210,175,164,0),24 },
+       { IPv4(210,175,179,0),24 },
+       { IPv4(210,175,188,0),24 },
+       { IPv4(210,178,0,0),17 },
+       { IPv4(210,178,0,0),16 },
+       { IPv4(210,178,128,0),17 },
+       { IPv4(210,179,0,0),17 },
+       { IPv4(210,179,0,0),16 },
+       { IPv4(210,179,128,0),17 },
+       { IPv4(210,180,0,0),19 },
+       { IPv4(210,180,64,0),19 },
+       { IPv4(210,180,96,0),19 },
+       { IPv4(210,180,128,0),18 },
+       { IPv4(210,180,192,0),19 },
+       { IPv4(210,180,224,0),19 },
+       { IPv4(210,181,0,0),19 },
+       { IPv4(210,181,28,0),24 },
+       { IPv4(210,181,32,0),19 },
+       { IPv4(210,181,64,0),18 },
+       { IPv4(210,181,128,0),18 },
+       { IPv4(210,181,142,0),23 },
+       { IPv4(210,181,144,0),22 },
+       { IPv4(210,181,148,0),22 },
+       { IPv4(210,181,164,0),23 },
+       { IPv4(210,181,166,0),24 },
+       { IPv4(210,181,188,0),23 },
+       { IPv4(210,181,190,0),24 },
+       { IPv4(210,181,192,0),19 },
+       { IPv4(210,182,0,0),16 },
+       { IPv4(210,182,144,0),24 },
+       { IPv4(210,183,0,0),16 },
+       { IPv4(210,186,0,0),16 },
+       { IPv4(210,186,0,0),17 },
+       { IPv4(210,187,0,0),16 },
+       { IPv4(210,188,192,0),19 },
+       { IPv4(210,189,160,0),21 },
+       { IPv4(210,191,64,0),18 },
+       { IPv4(210,191,192,0),19 },
+       { IPv4(210,192,0,0),18 },
+       { IPv4(210,192,96,0),19 },
+       { IPv4(210,193,0,0),20 },
+       { IPv4(210,193,0,0),19 },
+       { IPv4(210,195,0,0),16 },
+       { IPv4(210,196,0,0),16 },
+       { IPv4(210,198,128,0),17 },
+       { IPv4(210,199,96,0),19 },
+       { IPv4(210,200,32,0),19 },
+       { IPv4(210,204,0,0),16 },
+       { IPv4(210,204,0,0),17 },
+       { IPv4(210,204,128,0),17 },
+       { IPv4(210,204,248,0),22 },
+       { IPv4(210,204,252,0),22 },
+       { IPv4(210,205,0,0),18 },
+       { IPv4(210,205,64,0),19 },
+       { IPv4(210,205,128,0),17 },
+       { IPv4(210,205,236,0),24 },
+       { IPv4(210,206,0,0),16 },
+       { IPv4(210,207,0,0),16 },
+       { IPv4(210,207,87,0),24 },
+       { IPv4(210,207,195,0),24 },
+       { IPv4(210,207,198,0),24 },
+       { IPv4(210,207,199,0),24 },
+       { IPv4(210,208,0,0),21 },
+       { IPv4(210,208,8,0),21 },
+       { IPv4(210,208,32,0),19 },
+       { IPv4(210,208,80,0),20 },
+       { IPv4(210,208,96,0),19 },
+       { IPv4(210,208,160,0),19 },
+       { IPv4(210,209,0,0),18 },
+       { IPv4(210,210,0,0),20 },
+       { IPv4(210,210,32,0),22 },
+       { IPv4(210,210,36,0),22 },
+       { IPv4(210,210,39,0),24 },
+       { IPv4(210,210,40,0),22 },
+       { IPv4(210,210,40,0),24 },
+       { IPv4(210,210,41,0),24 },
+       { IPv4(210,210,43,0),24 },
+       { IPv4(210,210,44,0),22 },
+       { IPv4(210,210,48,0),24 },
+       { IPv4(210,210,49,0),24 },
+       { IPv4(210,214,0,0),20 },
+       { IPv4(210,214,4,0),23 },
+       { IPv4(210,214,16,0),21 },
+       { IPv4(210,214,24,0),21 },
+       { IPv4(210,214,48,0),20 },
+       { IPv4(210,214,64,0),23 },
+       { IPv4(210,214,64,0),20 },
+       { IPv4(210,214,80,0),20 },
+       { IPv4(210,214,96,0),21 },
+       { IPv4(210,214,106,0),24 },
+       { IPv4(210,214,108,0),22 },
+       { IPv4(210,214,112,0),20 },
+       { IPv4(210,214,128,0),20 },
+       { IPv4(210,214,144,0),24 },
+       { IPv4(210,214,144,0),21 },
+       { IPv4(210,214,145,0),24 },
+       { IPv4(210,214,146,0),24 },
+       { IPv4(210,214,147,0),24 },
+       { IPv4(210,214,149,0),24 },
+       { IPv4(210,214,152,0),21 },
+       { IPv4(210,214,160,0),20 },
+       { IPv4(210,214,165,0),24 },
+       { IPv4(210,214,168,0),23 },
+       { IPv4(210,214,172,0),22 },
+       { IPv4(210,214,178,0),24 },
+       { IPv4(210,214,184,0),21 },
+       { IPv4(210,214,192,0),20 },
+       { IPv4(210,214,197,0),24 },
+       { IPv4(210,214,208,0),20 },
+       { IPv4(210,214,240,0),20 },
+       { IPv4(210,216,0,0),16 },
+       { IPv4(210,216,13,0),24 },
+       { IPv4(210,217,0,0),17 },
+       { IPv4(210,217,128,0),19 },
+       { IPv4(210,217,160,0),19 },
+       { IPv4(210,217,183,0),24 },
+       { IPv4(210,217,192,0),19 },
+       { IPv4(210,217,224,0),19 },
+       { IPv4(210,218,0,0),18 },
+       { IPv4(210,218,64,0),19 },
+       { IPv4(210,218,128,0),18 },
+       { IPv4(210,218,192,0),19 },
+       { IPv4(210,218,195,0),24 },
+       { IPv4(210,218,224,0),19 },
+       { IPv4(210,219,0,0),19 },
+       { IPv4(210,219,32,0),22 },
+       { IPv4(210,219,36,0),24 },
+       { IPv4(210,219,37,0),24 },
+       { IPv4(210,219,38,0),23 },
+       { IPv4(210,219,40,0),21 },
+       { IPv4(210,219,48,0),20 },
+       { IPv4(210,219,128,0),18 },
+       { IPv4(210,220,0,0),19 },
+       { IPv4(210,220,13,0),24 },
+       { IPv4(210,220,21,0),24 },
+       { IPv4(210,220,22,0),24 },
+       { IPv4(210,220,32,0),21 },
+       { IPv4(210,220,40,0),22 },
+       { IPv4(210,220,64,0),19 },
+       { IPv4(210,220,96,0),19 },
+       { IPv4(210,220,128,0),19 },
+       { IPv4(210,220,160,0),19 },
+       { IPv4(210,221,0,0),17 },
+       { IPv4(210,221,128,0),19 },
+       { IPv4(210,221,160,0),20 },
+       { IPv4(210,221,176,0),20 },
+       { IPv4(210,221,192,0),19 },
+       { IPv4(210,221,224,0),19 },
+       { IPv4(210,222,0,0),15 },
+       { IPv4(210,224,160,0),19 },
+       { IPv4(210,224,192,0),18 },
+       { IPv4(210,229,160,0),19 },
+       { IPv4(210,230,0,0),17 },
+       { IPv4(210,230,70,0),23 },
+       { IPv4(210,230,72,0),23 },
+       { IPv4(210,230,74,0),24 },
+       { IPv4(210,230,128,0),17 },
+       { IPv4(210,233,0,0),18 },
+       { IPv4(210,234,0,0),16 },
+       { IPv4(210,235,160,0),19 },
+       { IPv4(210,236,0,0),19 },
+       { IPv4(210,236,32,0),20 },
+       { IPv4(210,236,64,0),19 },
+       { IPv4(210,236,128,0),19 },
+       { IPv4(210,236,192,0),19 },
+       { IPv4(210,237,32,0),19 },
+       { IPv4(210,238,32,0),19 },
+       { IPv4(210,238,128,0),17 },
+       { IPv4(210,239,96,0),19 },
+       { IPv4(210,239,128,0),17 },
+       { IPv4(210,240,0,0),16 },
+       { IPv4(210,241,0,0),17 },
+       { IPv4(210,241,128,0),19 },
+       { IPv4(210,241,160,0),24 },
+       { IPv4(210,241,192,0),19 },
+       { IPv4(210,243,0,0),18 },
+       { IPv4(210,243,128,0),17 },
+       { IPv4(210,243,128,0),22 },
+       { IPv4(210,243,182,0),23 },
+       { IPv4(210,243,192,0),18 },
+       { IPv4(210,244,0,0),18 },
+       { IPv4(210,244,0,0),17 },
+       { IPv4(210,244,28,0),22 },
+       { IPv4(210,244,128,0),18 },
+       { IPv4(210,244,192,0),20 },
+       { IPv4(210,244,208,0),20 },
+       { IPv4(210,244,224,0),19 },
+       { IPv4(210,249,0,0),17 },
+       { IPv4(210,249,224,0),19 },
+       { IPv4(210,251,0,0),17 },
+       { IPv4(210,251,192,0),20 },
+       { IPv4(210,251,224,0),22 },
+       { IPv4(210,252,0,0),18 },
+       { IPv4(210,253,32,0),19 },
+       { IPv4(210,253,192,0),19 },
+       { IPv4(210,255,0,0),17 },
+       { IPv4(210,255,160,0),19 },
+       { IPv4(210,255,224,0),19 },
+       { IPv4(211,1,0,0),22 },
+       { IPv4(211,1,128,0),19 },
+       { IPv4(211,1,160,0),23 },
+       { IPv4(211,1,192,0),19 },
+       { IPv4(211,2,160,0),19 },
+       { IPv4(211,4,0,0),16 },
+       { IPv4(211,4,243,0),24 },
+       { IPv4(211,5,0,0),16 },
+       { IPv4(211,7,96,0),19 },
+       { IPv4(211,7,224,0),19 },
+       { IPv4(211,9,64,0),18 },
+       { IPv4(211,12,192,0),22 },
+       { IPv4(211,12,192,0),19 },
+       { IPv4(211,12,196,0),22 },
+       { IPv4(211,12,200,0),22 },
+       { IPv4(211,12,208,0),22 },
+       { IPv4(211,12,212,0),22 },
+       { IPv4(211,12,216,0),22 },
+       { IPv4(211,12,224,0),19 },
+       { IPv4(211,13,0,0),17 },
+       { IPv4(211,13,160,0),19 },
+       { IPv4(211,13,224,0),19 },
+       { IPv4(211,14,192,0),19 },
+       { IPv4(211,14,224,0),19 },
+       { IPv4(211,15,64,0),19 },
+       { IPv4(211,18,0,0),16 },
+       { IPv4(211,23,0,0),16 },
+       { IPv4(211,32,0,0),16 },
+       { IPv4(211,32,7,0),24 },
+       { IPv4(211,32,32,0),24 },
+       { IPv4(211,32,160,0),24 },
+       { IPv4(211,33,0,0),17 },
+       { IPv4(211,33,128,0),17 },
+       { IPv4(211,33,130,0),24 },
+       { IPv4(211,34,0,0),16 },
+       { IPv4(211,34,0,0),17 },
+       { IPv4(211,34,110,0),24 },
+       { IPv4(211,34,111,0),24 },
+       { IPv4(211,34,112,0),24 },
+       { IPv4(211,34,128,0),17 },
+       { IPv4(211,35,0,0),18 },
+       { IPv4(211,35,42,0),24 },
+       { IPv4(211,35,43,0),24 },
+       { IPv4(211,35,64,0),19 },
+       { IPv4(211,35,128,0),17 },
+       { IPv4(211,36,0,0),22 },
+       { IPv4(211,36,96,0),19 },
+       { IPv4(211,36,128,0),19 },
+       { IPv4(211,36,160,0),19 },
+       { IPv4(211,36,192,0),22 },
+       { IPv4(211,36,196,0),22 },
+       { IPv4(211,36,200,0),22 },
+       { IPv4(211,36,204,0),22 },
+       { IPv4(211,36,208,0),20 },
+       { IPv4(211,36,224,0),19 },
+       { IPv4(211,37,0,0),17 },
+       { IPv4(211,37,93,0),24 },
+       { IPv4(211,37,128,0),18 },
+       { IPv4(211,37,192,0),19 },
+       { IPv4(211,37,224,0),19 },
+       { IPv4(211,38,0,0),16 },
+       { IPv4(211,38,9,0),24 },
+       { IPv4(211,39,64,0),18 },
+       { IPv4(211,39,90,0),24 },
+       { IPv4(211,39,128,0),22 },
+       { IPv4(211,39,132,0),22 },
+       { IPv4(211,39,136,0),22 },
+       { IPv4(211,39,140,0),22 },
+       { IPv4(211,39,144,0),20 },
+       { IPv4(211,39,192,0),19 },
+       { IPv4(211,39,224,0),19 },
+       { IPv4(211,40,0,0),16 },
+       { IPv4(211,41,0,0),19 },
+       { IPv4(211,41,32,0),19 },
+       { IPv4(211,41,64,0),19 },
+       { IPv4(211,41,89,0),24 },
+       { IPv4(211,41,90,0),24 },
+       { IPv4(211,41,96,0),19 },
+       { IPv4(211,41,128,0),19 },
+       { IPv4(211,41,160,0),19 },
+       { IPv4(211,41,192,0),19 },
+       { IPv4(211,41,224,0),22 },
+       { IPv4(211,41,228,0),22 },
+       { IPv4(211,41,232,0),22 },
+       { IPv4(211,41,236,0),22 },
+       { IPv4(211,41,240,0),22 },
+       { IPv4(211,41,244,0),22 },
+       { IPv4(211,41,248,0),22 },
+       { IPv4(211,41,252,0),22 },
+       { IPv4(211,42,0,0),17 },
+       { IPv4(211,42,10,0),24 },
+       { IPv4(211,42,16,0),24 },
+       { IPv4(211,42,23,0),24 },
+       { IPv4(211,42,72,0),22 },
+       { IPv4(211,42,84,0),23 },
+       { IPv4(211,42,86,0),24 },
+       { IPv4(211,42,116,0),22 },
+       { IPv4(211,42,120,0),24 },
+       { IPv4(211,42,124,0),22 },
+       { IPv4(211,42,128,0),19 },
+       { IPv4(211,42,160,0),19 },
+       { IPv4(211,42,192,0),19 },
+       { IPv4(211,42,224,0),19 },
+       { IPv4(211,43,3,0),24 },
+       { IPv4(211,43,4,0),24 },
+       { IPv4(211,43,7,0),24 },
+       { IPv4(211,43,8,0),21 },
+       { IPv4(211,43,15,0),24 },
+       { IPv4(211,43,16,0),21 },
+       { IPv4(211,43,20,0),24 },
+       { IPv4(211,43,21,0),24 },
+       { IPv4(211,43,22,0),24 },
+       { IPv4(211,43,23,0),24 },
+       { IPv4(211,43,29,0),24 },
+       { IPv4(211,43,30,0),23 },
+       { IPv4(211,43,32,0),19 },
+       { IPv4(211,43,64,0),21 },
+       { IPv4(211,43,72,0),22 },
+       { IPv4(211,43,76,0),22 },
+       { IPv4(211,43,80,0),20 },
+       { IPv4(211,43,96,0),20 },
+       { IPv4(211,43,112,0),21 },
+       { IPv4(211,43,120,0),24 },
+       { IPv4(211,43,128,0),20 },
+       { IPv4(211,43,144,0),20 },
+       { IPv4(211,44,0,0),16 },
+       { IPv4(211,44,62,0),24 },
+       { IPv4(211,45,32,0),19 },
+       { IPv4(211,45,64,0),19 },
+       { IPv4(211,45,96,0),19 },
+       { IPv4(211,45,128,0),18 },
+       { IPv4(211,45,192,0),18 },
+       { IPv4(211,46,0,0),17 },
+       { IPv4(211,46,0,0),16 },
+       { IPv4(211,46,128,0),17 },
+       { IPv4(211,47,0,0),18 },
+       { IPv4(211,47,80,0),20 },
+       { IPv4(211,47,96,0),19 },
+       { IPv4(211,47,128,0),19 },
+       { IPv4(211,47,160,0),24 },
+       { IPv4(211,47,161,0),24 },
+       { IPv4(211,47,162,0),24 },
+       { IPv4(211,47,176,0),20 },
+       { IPv4(211,47,192,0),19 },
+       { IPv4(211,47,224,0),19 },
+       { IPv4(211,48,0,0),16 },
+       { IPv4(211,49,0,0),17 },
+       { IPv4(211,49,128,0),17 },
+       { IPv4(211,50,0,0),16 },
+       { IPv4(211,51,0,0),16 },
+       { IPv4(211,51,28,0),24 },
+       { IPv4(211,51,39,0),24 },
+       { IPv4(211,52,0,0),18 },
+       { IPv4(211,52,128,0),17 },
+       { IPv4(211,53,213,0),24 },
+       { IPv4(211,54,0,0),15 },
+       { IPv4(211,54,123,0),24 },
+       { IPv4(211,55,45,0),24 },
+       { IPv4(211,55,46,0),24 },
+       { IPv4(211,56,0,0),17 },
+       { IPv4(211,56,102,0),23 },
+       { IPv4(211,56,128,0),18 },
+       { IPv4(211,56,192,0),19 },
+       { IPv4(211,56,224,0),19 },
+       { IPv4(211,57,0,0),16 },
+       { IPv4(211,57,0,0),17 },
+       { IPv4(211,57,128,0),17 },
+       { IPv4(211,58,0,0),16 },
+       { IPv4(211,58,248,0),24 },
+       { IPv4(211,58,249,0),24 },
+       { IPv4(211,59,0,0),16 },
+       { IPv4(211,60,0,0),16 },
+       { IPv4(211,60,213,0),24 },
+       { IPv4(211,61,0,0),18 },
+       { IPv4(211,61,48,0),24 },
+       { IPv4(211,61,49,0),24 },
+       { IPv4(211,61,50,0),24 },
+       { IPv4(211,61,51,0),24 },
+       { IPv4(211,61,64,0),19 },
+       { IPv4(211,61,64,0),18 },
+       { IPv4(211,61,96,0),19 },
+       { IPv4(211,61,128,0),17 },
+       { IPv4(211,61,247,0),24 },
+       { IPv4(211,62,0,0),18 },
+       { IPv4(211,62,64,0),18 },
+       { IPv4(211,62,103,0),24 },
+       { IPv4(211,62,128,0),17 },
+       { IPv4(211,63,0,0),19 },
+       { IPv4(211,63,32,0),19 },
+       { IPv4(211,63,64,0),19 },
+       { IPv4(211,63,96,0),19 },
+       { IPv4(211,63,128,0),17 },
+       { IPv4(211,63,175,0),24 },
+       { IPv4(211,72,0,0),16 },
+       { IPv4(211,73,0,0),19 },
+       { IPv4(211,73,96,0),19 },
+       { IPv4(211,73,128,0),19 },
+       { IPv4(211,73,160,0),20 },
+       { IPv4(211,73,192,0),18 },
+       { IPv4(211,74,0,0),16 },
+       { IPv4(211,74,0,0),18 },
+       { IPv4(211,74,64,0),18 },
+       { IPv4(211,74,128,0),18 },
+       { IPv4(211,74,146,0),24 },
+       { IPv4(211,74,150,0),24 },
+       { IPv4(211,74,192,0),18 },
+       { IPv4(211,74,192,0),19 },
+       { IPv4(211,74,224,0),19 },
+       { IPv4(211,76,0,0),19 },
+       { IPv4(211,76,64,0),20 },
+       { IPv4(211,76,80,0),20 },
+       { IPv4(211,76,112,0),20 },
+       { IPv4(211,76,128,0),20 },
+       { IPv4(211,77,0,0),18 },
+       { IPv4(211,77,64,0),18 },
+       { IPv4(211,77,128,0),17 },
+       { IPv4(211,78,0,0),20 },
+       { IPv4(211,78,16,0),20 },
+       { IPv4(211,78,32,0),20 },
+       { IPv4(211,78,48,0),21 },
+       { IPv4(211,78,48,0),20 },
+       { IPv4(211,78,56,0),21 },
+       { IPv4(211,78,80,0),22 },
+       { IPv4(211,78,96,0),19 },
+       { IPv4(211,78,96,0),24 },
+       { IPv4(211,78,97,0),24 },
+       { IPv4(211,78,160,0),19 },
+       { IPv4(211,79,32,0),20 },
+       { IPv4(211,79,128,0),19 },
+       { IPv4(211,79,160,0),19 },
+       { IPv4(211,79,192,0),20 },
+       { IPv4(211,79,208,0),20 },
+       { IPv4(211,79,240,0),20 },
+       { IPv4(211,88,0,0),16 },
+       { IPv4(211,98,0,0),17 },
+       { IPv4(211,99,0,0),19 },
+       { IPv4(211,99,32,0),19 },
+       { IPv4(211,99,64,0),19 },
+       { IPv4(211,99,160,0),19 },
+       { IPv4(211,99,192,0),19 },
+       { IPv4(211,99,224,0),19 },
+       { IPv4(211,100,0,0),19 },
+       { IPv4(211,100,32,0),19 },
+       { IPv4(211,100,64,0),19 },
+       { IPv4(211,100,96,0),19 },
+       { IPv4(211,101,0,0),18 },
+       { IPv4(211,101,128,0),17 },
+       { IPv4(211,101,128,0),19 },
+       { IPv4(211,101,160,0),19 },
+       { IPv4(211,101,192,0),19 },
+       { IPv4(211,101,224,0),19 },
+       { IPv4(211,102,0,0),19 },
+       { IPv4(211,102,32,0),19 },
+       { IPv4(211,102,64,0),19 },
+       { IPv4(211,102,96,0),19 },
+       { IPv4(211,104,0,0),14 },
+       { IPv4(211,104,34,0),24 },
+       { IPv4(211,106,22,0),24 },
+       { IPv4(211,108,0,0),16 },
+       { IPv4(211,109,0,0),16 },
+       { IPv4(211,110,0,0),16 },
+       { IPv4(211,111,0,0),17 },
+       { IPv4(211,111,128,0),20 },
+       { IPv4(211,111,144,0),20 },
+       { IPv4(211,111,160,0),20 },
+       { IPv4(211,111,176,0),20 },
+       { IPv4(211,111,192,0),20 },
+       { IPv4(211,111,208,0),20 },
+       { IPv4(211,111,224,0),19 },
+       { IPv4(211,112,64,0),19 },
+       { IPv4(211,112,96,0),19 },
+       { IPv4(211,112,128,0),17 },
+       { IPv4(211,112,166,0),23 },
+       { IPv4(211,113,0,0),17 },
+       { IPv4(211,113,1,0),24 },
+       { IPv4(211,113,128,0),18 },
+       { IPv4(211,113,128,0),17 },
+       { IPv4(211,113,192,0),18 },
+       { IPv4(211,114,0,0),16 },
+       { IPv4(211,114,0,0),17 },
+       { IPv4(211,114,35,0),24 },
+       { IPv4(211,114,45,0),24 },
+       { IPv4(211,114,98,0),24 },
+       { IPv4(211,114,99,0),24 },
+       { IPv4(211,114,128,0),17 },
+       { IPv4(211,115,0,0),19 },
+       { IPv4(211,115,32,0),19 },
+       { IPv4(211,115,64,0),19 },
+       { IPv4(211,115,128,0),18 },
+       { IPv4(211,115,192,0),19 },
+       { IPv4(211,115,224,0),19 },
+       { IPv4(211,116,0,0),18 },
+       { IPv4(211,116,128,0),19 },
+       { IPv4(211,116,160,0),22 },
+       { IPv4(211,116,176,0),20 },
+       { IPv4(211,116,224,0),19 },
+       { IPv4(211,117,0,0),16 },
+       { IPv4(211,118,0,0),16 },
+       { IPv4(211,118,128,0),24 },
+       { IPv4(211,119,0,0),16 },
+       { IPv4(211,120,224,0),20 },
+       { IPv4(211,125,192,0),22 },
+       { IPv4(211,125,200,0),21 },
+       { IPv4(211,125,208,0),21 },
+       { IPv4(211,126,0,0),16 },
+       { IPv4(211,128,254,0),23 },
+       { IPv4(211,130,96,0),19 },
+       { IPv4(211,132,96,0),19 },
+       { IPv4(211,133,144,0),20 },
+       { IPv4(211,133,160,0),19 },
+       { IPv4(211,133,224,0),20 },
+       { IPv4(211,134,0,0),16 },
+       { IPv4(211,135,128,0),17 },
+       { IPv4(211,144,0,0),20 },
+       { IPv4(211,144,32,0),20 },
+       { IPv4(211,144,224,0),19 },
+       { IPv4(211,146,0,0),16 },
+       { IPv4(211,147,0,0),19 },
+       { IPv4(211,147,32,0),19 },
+       { IPv4(211,147,96,0),19 },
+       { IPv4(211,148,128,0),19 },
+       { IPv4(211,149,0,0),16 },
+       { IPv4(211,151,0,0),17 },
+       { IPv4(211,151,128,0),18 },
+       { IPv4(211,152,64,0),19 },
+       { IPv4(211,152,96,0),19 },
+       { IPv4(211,152,128,0),20 },
+       { IPv4(211,152,144,0),21 },
+       { IPv4(211,152,184,0),21 },
+       { IPv4(211,152,192,0),19 },
+       { IPv4(211,154,32,0),19 },
+       { IPv4(211,154,192,0),18 },
+       { IPv4(211,155,16,0),20 },
+       { IPv4(211,155,32,0),19 },
+       { IPv4(211,155,64,0),19 },
+       { IPv4(211,155,160,0),20 },
+       { IPv4(211,155,224,0),20 },
+       { IPv4(211,156,0,0),19 },
+       { IPv4(211,156,128,0),19 },
+       { IPv4(211,156,160,0),19 },
+       { IPv4(211,157,32,0),19 },
+       { IPv4(211,157,64,0),19 },
+       { IPv4(211,159,64,0),20 },
+       { IPv4(211,159,80,0),20 },
+       { IPv4(211,159,96,0),19 },
+       { IPv4(211,160,0,0),16 },
+       { IPv4(211,164,0,0),16 },
+       { IPv4(211,165,0,0),16 },
+       { IPv4(211,166,0,0),16 },
+       { IPv4(211,167,0,0),19 },
+       { IPv4(211,167,64,0),19 },
+       { IPv4(211,167,128,0),19 },
+       { IPv4(211,167,160,0),20 },
+       { IPv4(211,167,192,0),19 },
+       { IPv4(211,167,224,0),19 },
+       { IPv4(211,168,0,0),16 },
+       { IPv4(211,168,255,0),24 },
+       { IPv4(211,169,0,0),16 },
+       { IPv4(211,169,225,0),24 },
+       { IPv4(211,170,0,0),16 },
+       { IPv4(211,171,0,0),16 },
+       { IPv4(211,171,206,0),24 },
+       { IPv4(211,172,0,0),18 },
+       { IPv4(211,172,80,0),22 },
+       { IPv4(211,172,84,0),22 },
+       { IPv4(211,172,88,0),22 },
+       { IPv4(211,172,92,0),22 },
+       { IPv4(211,172,112,0),20 },
+       { IPv4(211,172,128,0),20 },
+       { IPv4(211,172,144,0),20 },
+       { IPv4(211,172,176,0),21 },
+       { IPv4(211,172,184,0),21 },
+       { IPv4(211,172,192,0),20 },
+       { IPv4(211,172,224,0),20 },
+       { IPv4(211,172,240,0),20 },
+       { IPv4(211,173,0,0),17 },
+       { IPv4(211,173,17,0),24 },
+       { IPv4(211,173,78,0),23 },
+       { IPv4(211,174,0,0),20 },
+       { IPv4(211,174,32,0),21 },
+       { IPv4(211,174,40,0),22 },
+       { IPv4(211,174,44,0),23 },
+       { IPv4(211,174,46,0),23 },
+       { IPv4(211,174,48,0),20 },
+       { IPv4(211,174,64,0),19 },
+       { IPv4(211,174,96,0),19 },
+       { IPv4(211,175,0,0),16 },
+       { IPv4(211,175,239,0),24 },
+       { IPv4(211,176,0,0),14 },
+       { IPv4(211,176,30,0),24 },
+       { IPv4(211,180,0,0),16 },
+       { IPv4(211,181,0,0),16 },
+       { IPv4(211,181,249,0),24 },
+       { IPv4(211,182,0,0),16 },
+       { IPv4(211,183,0,0),16 },
+       { IPv4(211,183,106,0),24 },
+       { IPv4(211,183,107,0),24 },
+       { IPv4(211,183,108,0),24 },
+       { IPv4(211,184,0,0),15 },
+       { IPv4(211,184,0,0),16 },
+       { IPv4(211,185,0,0),16 },
+       { IPv4(211,186,0,0),16 },
+       { IPv4(211,187,0,0),16 },
+       { IPv4(211,188,0,0),17 },
+       { IPv4(211,188,32,0),24 },
+       { IPv4(211,188,128,0),17 },
+       { IPv4(211,189,128,0),19 },
+       { IPv4(211,189,160,0),19 },
+       { IPv4(211,189,192,0),19 },
+       { IPv4(211,189,224,0),19 },
+       { IPv4(211,190,0,0),17 },
+       { IPv4(211,190,0,0),16 },
+       { IPv4(211,190,30,0),24 },
+       { IPv4(211,190,31,0),24 },
+       { IPv4(211,190,128,0),17 },
+       { IPv4(211,191,0,0),16 },
+       { IPv4(211,191,0,0),17 },
+       { IPv4(211,191,128,0),17 },
+       { IPv4(211,192,0,0),24 },
+       { IPv4(211,192,0,0),13 },
+       { IPv4(211,192,45,0),24 },
+       { IPv4(211,192,169,0),24 },
+       { IPv4(211,195,85,0),24 },
+       { IPv4(211,200,0,0),13 },
+       { IPv4(211,205,67,0),24 },
+       { IPv4(211,205,77,0),24 },
+       { IPv4(211,208,0,0),14 },
+       { IPv4(211,212,0,0),14 },
+       { IPv4(211,214,60,0),24 },
+       { IPv4(211,214,68,0),24 },
+       { IPv4(211,214,70,0),24 },
+       { IPv4(211,216,0,0),13 },
+       { IPv4(211,216,216,0),24 },
+       { IPv4(211,217,8,0),24 },
+       { IPv4(211,217,9,0),24 },
+       { IPv4(211,217,10,0),24 },
+       { IPv4(211,217,11,0),24 },
+       { IPv4(211,217,12,0),24 },
+       { IPv4(211,217,13,0),24 },
+       { IPv4(211,217,14,0),24 },
+       { IPv4(211,217,15,0),24 },
+       { IPv4(211,217,16,0),24 },
+       { IPv4(211,217,17,0),24 },
+       { IPv4(211,217,18,0),24 },
+       { IPv4(211,217,20,0),24 },
+       { IPv4(211,217,21,0),24 },
+       { IPv4(211,217,22,0),24 },
+       { IPv4(211,218,235,0),24 },
+       { IPv4(211,218,236,0),24 },
+       { IPv4(211,218,237,0),24 },
+       { IPv4(211,219,24,0),24 },
+       { IPv4(211,219,66,0),24 },
+       { IPv4(211,224,0,0),13 },
+       { IPv4(211,226,77,0),24 },
+       { IPv4(211,232,0,0),17 },
+       { IPv4(211,232,128,0),18 },
+       { IPv4(211,233,0,0),18 },
+       { IPv4(211,233,64,0),20 },
+       { IPv4(211,233,80,0),20 },
+       { IPv4(211,233,128,0),18 },
+       { IPv4(211,234,0,0),17 },
+       { IPv4(211,234,128,0),17 },
+       { IPv4(211,235,192,0),21 },
+       { IPv4(211,235,200,0),21 },
+       { IPv4(211,235,208,0),22 },
+       { IPv4(211,235,212,0),23 },
+       { IPv4(211,235,214,0),23 },
+       { IPv4(211,235,216,0),22 },
+       { IPv4(211,235,220,0),23 },
+       { IPv4(211,235,222,0),24 },
+       { IPv4(211,235,223,0),24 },
+       { IPv4(211,235,224,0),19 },
+       { IPv4(211,236,0,0),19 },
+       { IPv4(211,236,32,0),19 },
+       { IPv4(211,236,64,0),18 },
+       { IPv4(211,236,128,0),19 },
+       { IPv4(211,236,160,0),19 },
+       { IPv4(211,236,192,0),19 },
+       { IPv4(211,237,0,0),21 },
+       { IPv4(211,237,8,0),22 },
+       { IPv4(211,237,12,0),23 },
+       { IPv4(211,237,14,0),23 },
+       { IPv4(211,237,16,0),20 },
+       { IPv4(211,237,32,0),20 },
+       { IPv4(211,237,48,0),20 },
+       { IPv4(211,237,64,0),20 },
+       { IPv4(211,237,80,0),20 },
+       { IPv4(211,237,96,0),20 },
+       { IPv4(211,237,112,0),20 },
+       { IPv4(211,237,128,0),20 },
+       { IPv4(211,237,176,0),20 },
+       { IPv4(211,237,192,0),20 },
+       { IPv4(211,237,224,0),20 },
+       { IPv4(211,237,240,0),20 },
+       { IPv4(211,238,0,0),20 },
+       { IPv4(211,238,16,0),21 },
+       { IPv4(211,238,24,0),21 },
+       { IPv4(211,238,32,0),19 },
+       { IPv4(211,238,55,0),24 },
+       { IPv4(211,238,64,0),19 },
+       { IPv4(211,238,96,0),19 },
+       { IPv4(211,238,128,0),19 },
+       { IPv4(211,238,160,0),20 },
+       { IPv4(211,238,176,0),20 },
+       { IPv4(211,238,176,0),21 },
+       { IPv4(211,238,192,0),20 },
+       { IPv4(211,238,224,0),22 },
+       { IPv4(211,238,228,0),22 },
+       { IPv4(211,238,232,0),22 },
+       { IPv4(211,238,236,0),22 },
+       { IPv4(211,239,0,0),17 },
+       { IPv4(211,239,128,0),18 },
+       { IPv4(211,240,128,0),17 },
+       { IPv4(211,241,0,0),17 },
+       { IPv4(211,242,0,0),17 },
+       { IPv4(211,242,20,0),24 },
+       { IPv4(211,242,21,0),24 },
+       { IPv4(211,242,128,0),18 },
+       { IPv4(211,242,141,0),24 },
+       { IPv4(211,243,0,0),16 },
+       { IPv4(211,245,0,0),17 },
+       { IPv4(211,245,128,0),17 },
+       { IPv4(211,246,0,0),15 },
+       { IPv4(211,248,0,0),17 },
+       { IPv4(211,248,0,0),16 },
+       { IPv4(211,248,128,0),17 },
+       { IPv4(211,250,0,0),15 },
+       { IPv4(211,250,0,0),16 },
+       { IPv4(211,251,0,0),16 },
+       { IPv4(211,251,216,0),21 },
+       { IPv4(211,252,0,0),16 },
+       { IPv4(211,252,0,0),15 },
+       { IPv4(211,252,208,0),21 },
+       { IPv4(211,252,220,0),24 },
+       { IPv4(211,253,0,0),16 },
+       { IPv4(211,254,0,0),17 },
+       { IPv4(211,254,128,0),18 },
+       { IPv4(211,255,0,0),19 },
+       { IPv4(211,255,32,0),19 },
+       { IPv4(211,255,64,0),21 },
+       { IPv4(211,255,72,0),23 },
+       { IPv4(211,255,74,0),23 },
+       { IPv4(211,255,76,0),22 },
+       { IPv4(211,255,80,0),22 },
+       { IPv4(211,255,84,0),24 },
+       { IPv4(211,255,85,0),24 },
+       { IPv4(211,255,86,0),23 },
+       { IPv4(211,255,88,0),22 },
+       { IPv4(211,255,92,0),23 },
+       { IPv4(211,255,94,0),23 },
+       { IPv4(211,255,114,0),23 },
+       { IPv4(211,255,116,0),22 },
+       { IPv4(211,255,120,0),23 },
+       { IPv4(211,255,121,0),24 },
+       { IPv4(211,255,126,0),24 },
+       { IPv4(211,255,127,0),24 },
+       { IPv4(211,255,128,0),19 },
+       { IPv4(211,255,160,0),19 },
+       { IPv4(211,255,192,0),21 },
+       { IPv4(211,255,200,0),21 },
+       { IPv4(211,255,224,0),20 },
+       { IPv4(211,255,240,0),20 },
+       { IPv4(212,0,128,0),19 },
+       { IPv4(212,1,0,0),19 },
+       { IPv4(212,3,32,0),19 },
+       { IPv4(212,3,44,0),23 },
+       { IPv4(212,3,160,0),19 },
+       { IPv4(212,4,64,0),19 },
+       { IPv4(212,4,208,0),24 },
+       { IPv4(212,7,32,0),19 },
+       { IPv4(212,7,64,0),19 },
+       { IPv4(212,8,0,0),19 },
+       { IPv4(212,8,160,0),19 },
+       { IPv4(212,9,96,0),22 },
+       { IPv4(212,9,96,0),19 },
+       { IPv4(212,9,160,0),19 },
+       { IPv4(212,11,224,0),19 },
+       { IPv4(212,12,128,0),19 },
+       { IPv4(212,14,64,0),19 },
+       { IPv4(212,14,89,0),24 },
+       { IPv4(212,14,96,0),19 },
+       { IPv4(212,15,32,0),21 },
+       { IPv4(212,15,64,0),19 },
+       { IPv4(212,15,128,0),19 },
+       { IPv4(212,16,32,0),19 },
+       { IPv4(212,16,96,0),19 },
+       { IPv4(212,16,192,0),19 },
+       { IPv4(212,16,202,0),24 },
+       { IPv4(212,16,208,0),24 },
+       { IPv4(212,16,209,0),24 },
+       { IPv4(212,16,212,0),24 },
+       { IPv4(212,17,32,0),19 },
+       { IPv4(212,17,128,0),19 },
+       { IPv4(212,17,192,0),19 },
+       { IPv4(212,18,96,0),19 },
+       { IPv4(212,18,192,0),19 },
+       { IPv4(212,19,32,0),19 },
+       { IPv4(212,20,160,0),19 },
+       { IPv4(212,20,224,0),19 },
+       { IPv4(212,20,228,0),24 },
+       { IPv4(212,21,0,0),19 },
+       { IPv4(212,21,64,0),19 },
+       { IPv4(212,21,96,0),19 },
+       { IPv4(212,21,192,0),19 },
+       { IPv4(212,21,196,0),22 },
+       { IPv4(212,21,200,0),23 },
+       { IPv4(212,21,202,0),23 },
+       { IPv4(212,22,32,0),19 },
+       { IPv4(212,22,96,0),24 },
+       { IPv4(212,22,160,0),19 },
+       { IPv4(212,22,192,0),19 },
+       { IPv4(212,23,0,0),19 },
+       { IPv4(212,23,128,0),19 },
+       { IPv4(212,24,64,0),19 },
+       { IPv4(212,24,192,0),19 },
+       { IPv4(212,25,0,0),19 },
+       { IPv4(212,25,224,0),19 },
+       { IPv4(212,28,0,0),20 },
+       { IPv4(212,28,16,0),20 },
+       { IPv4(212,28,96,0),19 },
+       { IPv4(212,30,0,0),19 },
+       { IPv4(212,30,32,0),19 },
+       { IPv4(212,31,0,0),19 },
+       { IPv4(212,31,64,0),19 },
+       { IPv4(212,31,106,0),24 },
+       { IPv4(212,31,107,0),24 },
+       { IPv4(212,31,128,0),19 },
+       { IPv4(212,31,192,0),21 },
+       { IPv4(212,31,200,0),21 },
+       { IPv4(212,31,206,0),23 },
+       { IPv4(212,32,224,0),19 },
+       { IPv4(212,33,32,0),19 },
+       { IPv4(212,33,166,0),24 },
+       { IPv4(212,34,160,0),19 },
+       { IPv4(212,35,32,0),19 },
+       { IPv4(212,35,128,0),19 },
+       { IPv4(212,36,32,0),19 },
+       { IPv4(212,36,96,0),19 },
+       { IPv4(212,37,32,0),19 },
+       { IPv4(212,37,64,0),19 },
+       { IPv4(212,38,160,0),19 },
+       { IPv4(212,39,224,0),19 },
+       { IPv4(212,40,224,0),19 },
+       { IPv4(212,41,64,0),18 },
+       { IPv4(212,41,128,0),19 },
+       { IPv4(212,41,224,0),19 },
+       { IPv4(212,42,64,0),19 },
+       { IPv4(212,42,224,0),19 },
+       { IPv4(212,43,128,0),19 },
+       { IPv4(212,43,160,0),19 },
+       { IPv4(212,43,192,0),18 },
+       { IPv4(212,44,0,0),18 },
+       { IPv4(212,44,160,0),19 },
+       { IPv4(212,46,64,0),19 },
+       { IPv4(212,46,128,0),19 },
+       { IPv4(212,47,160,0),19 },
+       { IPv4(212,47,170,0),23 },
+       { IPv4(212,47,172,0),23 },
+       { IPv4(212,48,32,0),19 },
+       { IPv4(212,48,224,0),19 },
+       { IPv4(212,49,128,0),19 },
+       { IPv4(212,49,128,0),18 },
+       { IPv4(212,49,160,0),19 },
+       { IPv4(212,49,192,0),19 },
+       { IPv4(212,49,224,0),19 },
+       { IPv4(212,50,160,0),19 },
+       { IPv4(212,51,32,0),19 },
+       { IPv4(212,53,0,0),19 },
+       { IPv4(212,54,160,0),19 },
+       { IPv4(212,54,160,0),24 },
+       { IPv4(212,54,167,0),24 },
+       { IPv4(212,54,170,0),24 },
+       { IPv4(212,54,179,0),24 },
+       { IPv4(212,54,185,0),24 },
+       { IPv4(212,55,153,0),24 },
+       { IPv4(212,56,0,0),19 },
+       { IPv4(212,56,0,0),24 },
+       { IPv4(212,56,1,0),24 },
+       { IPv4(212,56,2,0),24 },
+       { IPv4(212,56,3,0),24 },
+       { IPv4(212,56,5,0),24 },
+       { IPv4(212,56,6,0),24 },
+       { IPv4(212,56,7,0),24 },
+       { IPv4(212,56,9,0),24 },
+       { IPv4(212,56,13,0),24 },
+       { IPv4(212,56,14,0),24 },
+       { IPv4(212,56,16,0),22 },
+       { IPv4(212,56,22,0),23 },
+       { IPv4(212,56,24,0),21 },
+       { IPv4(212,56,160,0),19 },
+       { IPv4(212,56,224,0),19 },
+       { IPv4(212,57,0,0),19 },
+       { IPv4(212,57,34,0),24 },
+       { IPv4(212,58,32,0),19 },
+       { IPv4(212,58,128,0),19 },
+       { IPv4(212,58,146,0),24 },
+       { IPv4(212,59,64,0),19 },
+       { IPv4(212,59,192,0),19 },
+       { IPv4(212,60,32,0),19 },
+       { IPv4(212,60,96,0),19 },
+       { IPv4(212,61,0,0),16 },
+       { IPv4(212,66,64,0),19 },
+       { IPv4(212,66,128,0),19 },
+       { IPv4(212,67,96,0),20 },
+       { IPv4(212,67,112,0),22 },
+       { IPv4(212,67,116,0),24 },
+       { IPv4(212,67,118,0),24 },
+       { IPv4(212,67,118,0),23 },
+       { IPv4(212,67,192,0),19 },
+       { IPv4(212,68,160,0),19 },
+       { IPv4(212,68,192,0),19 },
+       { IPv4(212,69,160,0),19 },
+       { IPv4(212,70,64,0),19 },
+       { IPv4(212,70,96,0),19 },
+       { IPv4(212,70,192,0),19 },
+       { IPv4(212,71,192,0),19 },
+       { IPv4(212,71,224,0),22 },
+       { IPv4(212,71,240,0),22 },
+       { IPv4(212,71,252,0),22 },
+       { IPv4(212,73,32,0),19 },
+       { IPv4(212,75,224,0),19 },
+       { IPv4(212,76,128,0),19 },
+       { IPv4(212,76,224,0),19 },
+       { IPv4(212,77,160,0),19 },
+       { IPv4(212,77,224,0),19 },
+       { IPv4(212,79,128,0),19 },
+       { IPv4(212,79,224,0),19 },
+       { IPv4(212,80,96,0),19 },
+       { IPv4(212,82,0,0),19 },
+       { IPv4(212,82,224,0),19 },
+       { IPv4(212,83,32,0),19 },
+       { IPv4(212,84,96,0),19 },
+       { IPv4(212,85,224,0),19 },
+       { IPv4(212,86,32,0),19 },
+       { IPv4(212,86,64,0),19 },
+       { IPv4(212,88,0,0),19 },
+       { IPv4(212,88,96,0),24 },
+       { IPv4(212,88,97,0),24 },
+       { IPv4(212,88,98,0),24 },
+       { IPv4(212,88,99,0),24 },
+       { IPv4(212,88,128,0),19 },
+       { IPv4(212,88,160,0),19 },
+       { IPv4(212,88,192,0),19 },
+       { IPv4(212,88,224,0),19 },
+       { IPv4(212,89,64,0),22 },
+       { IPv4(212,89,69,0),24 },
+       { IPv4(212,89,82,0),24 },
+       { IPv4(212,89,83,0),24 },
+       { IPv4(212,89,128,0),19 },
+       { IPv4(212,89,160,0),19 },
+       { IPv4(212,89,164,0),24 },
+       { IPv4(212,90,32,0),20 },
+       { IPv4(212,90,64,0),19 },
+       { IPv4(212,91,0,0),19 },
+       { IPv4(212,92,64,0),19 },
+       { IPv4(212,92,192,0),22 },
+       { IPv4(212,93,0,0),19 },
+       { IPv4(212,95,144,0),20 },
+       { IPv4(212,95,192,0),19 },
+       { IPv4(212,95,224,0),19 },
+       { IPv4(212,96,0,0),23 },
+       { IPv4(212,96,2,0),24 },
+       { IPv4(212,96,3,0),24 },
+       { IPv4(212,96,4,0),24 },
+       { IPv4(212,96,5,0),24 },
+       { IPv4(212,96,6,0),23 },
+       { IPv4(212,96,8,0),21 },
+       { IPv4(212,96,16,0),21 },
+       { IPv4(212,96,24,0),22 },
+       { IPv4(212,96,30,0),24 },
+       { IPv4(212,96,128,0),19 },
+       { IPv4(212,97,0,0),19 },
+       { IPv4(212,97,160,0),19 },
+       { IPv4(212,98,0,0),20 },
+       { IPv4(212,98,16,0),20 },
+       { IPv4(212,98,64,0),18 },
+       { IPv4(212,98,128,0),24 },
+       { IPv4(212,98,128,0),19 },
+       { IPv4(212,98,129,0),24 },
+       { IPv4(212,98,130,0),24 },
+       { IPv4(212,98,134,0),24 },
+       { IPv4(212,98,135,0),24 },
+       { IPv4(212,98,136,0),24 },
+       { IPv4(212,98,139,0),24 },
+       { IPv4(212,98,140,0),24 },
+       { IPv4(212,98,141,0),24 },
+       { IPv4(212,98,142,0),24 },
+       { IPv4(212,98,143,0),24 },
+       { IPv4(212,98,144,0),24 },
+       { IPv4(212,98,145,0),24 },
+       { IPv4(212,98,146,0),24 },
+       { IPv4(212,98,147,0),24 },
+       { IPv4(212,98,148,0),24 },
+       { IPv4(212,98,149,0),24 },
+       { IPv4(212,98,150,0),24 },
+       { IPv4(212,98,151,0),24 },
+       { IPv4(212,98,152,0),24 },
+       { IPv4(212,98,153,0),24 },
+       { IPv4(212,98,154,0),24 },
+       { IPv4(212,98,157,0),24 },
+       { IPv4(212,98,158,0),24 },
+       { IPv4(212,98,200,0),21 },
+       { IPv4(212,98,201,0),24 },
+       { IPv4(212,98,202,0),24 },
+       { IPv4(212,98,219,0),24 },
+       { IPv4(212,98,223,0),24 },
+       { IPv4(212,98,234,0),24 },
+       { IPv4(212,98,245,0),24 },
+       { IPv4(212,98,252,0),24 },
+       { IPv4(212,100,0,0),19 },
+       { IPv4(212,100,64,0),24 },
+       { IPv4(212,100,66,0),24 },
+       { IPv4(212,100,67,0),24 },
+       { IPv4(212,100,68,0),22 },
+       { IPv4(212,100,96,0),24 },
+       { IPv4(212,100,97,0),24 },
+       { IPv4(212,100,98,0),24 },
+       { IPv4(212,101,192,0),19 },
+       { IPv4(212,102,67,0),24 },
+       { IPv4(212,102,68,0),24 },
+       { IPv4(212,102,192,0),20 },
+       { IPv4(212,102,224,0),19 },
+       { IPv4(212,103,32,0),19 },
+       { IPv4(212,103,64,0),19 },
+       { IPv4(212,104,64,0),21 },
+       { IPv4(212,104,72,0),22 },
+       { IPv4(212,104,76,0),22 },
+       { IPv4(212,104,80,0),21 },
+       { IPv4(212,105,128,0),19 },
+       { IPv4(212,106,192,0),18 },
+       { IPv4(212,107,0,0),19 },
+       { IPv4(212,108,0,0),19 },
+       { IPv4(212,108,8,0),21 },
+       { IPv4(212,108,10,0),23 },
+       { IPv4(212,108,14,0),23 },
+       { IPv4(212,108,16,0),22 },
+       { IPv4(212,108,20,0),22 },
+       { IPv4(212,108,64,0),19 },
+       { IPv4(212,108,128,0),20 },
+       { IPv4(212,108,160,0),19 },
+       { IPv4(212,109,224,0),19 },
+       { IPv4(212,110,32,0),19 },
+       { IPv4(212,110,67,0),24 },
+       { IPv4(212,110,68,0),24 },
+       { IPv4(212,110,69,0),24 },
+       { IPv4(212,110,70,0),24 },
+       { IPv4(212,110,71,0),24 },
+       { IPv4(212,110,72,0),24 },
+       { IPv4(212,110,73,0),24 },
+       { IPv4(212,110,75,0),24 },
+       { IPv4(212,110,76,0),24 },
+       { IPv4(212,110,77,0),24 },
+       { IPv4(212,110,78,0),24 },
+       { IPv4(212,110,79,0),24 },
+       { IPv4(212,110,80,0),24 },
+       { IPv4(212,110,81,0),24 },
+       { IPv4(212,110,82,0),24 },
+       { IPv4(212,110,83,0),24 },
+       { IPv4(212,110,84,0),24 },
+       { IPv4(212,110,85,0),24 },
+       { IPv4(212,110,91,0),24 },
+       { IPv4(212,110,92,0),24 },
+       { IPv4(212,110,93,0),24 },
+       { IPv4(212,110,94,0),24 },
+       { IPv4(212,110,95,0),24 },
+       { IPv4(212,111,32,0),19 },
+       { IPv4(212,113,32,0),19 },
+       { IPv4(212,113,36,0),22 },
+       { IPv4(212,113,40,0),22 },
+       { IPv4(212,113,64,0),19 },
+       { IPv4(212,113,192,0),19 },
+       { IPv4(212,113,224,0),19 },
+       { IPv4(212,114,96,0),19 },
+       { IPv4(212,115,0,0),19 },
+       { IPv4(212,115,32,0),19 },
+       { IPv4(212,115,96,0),19 },
+       { IPv4(212,115,192,0),19 },
+       { IPv4(212,117,64,0),19 },
+       { IPv4(212,118,64,0),19 },
+       { IPv4(212,118,160,0),19 },
+       { IPv4(212,120,32,0),19 },
+       { IPv4(212,120,64,0),18 },
+       { IPv4(212,120,224,0),20 },
+       { IPv4(212,121,0,0),19 },
+       { IPv4(212,122,96,0),19 },
+       { IPv4(212,125,96,0),19 },
+       { IPv4(212,126,128,0),19 },
+       { IPv4(212,126,192,0),19 },
+       { IPv4(212,127,0,0),19 },
+       { IPv4(212,127,16,0),20 },
+       { IPv4(212,127,21,0),24 },
+       { IPv4(212,127,22,0),24 },
+       { IPv4(212,127,23,0),24 },
+       { IPv4(212,127,24,0),21 },
+       { IPv4(212,127,32,0),19 },
+       { IPv4(212,129,64,0),24 },
+       { IPv4(212,132,0,0),16 },
+       { IPv4(212,133,128,0),18 },
+       { IPv4(212,140,0,0),16 },
+       { IPv4(212,147,0,0),17 },
+       { IPv4(212,148,236,0),24 },
+       { IPv4(212,161,128,0),17 },
+       { IPv4(212,162,64,0),18 },
+       { IPv4(212,162,192,0),20 },
+       { IPv4(212,162,195,0),24 },
+       { IPv4(212,162,196,0),24 },
+       { IPv4(212,162,200,0),22 },
+       { IPv4(212,162,208,0),20 },
+       { IPv4(212,162,216,0),22 },
+       { IPv4(212,162,224,0),22 },
+       { IPv4(212,162,228,0),22 },
+       { IPv4(212,162,232,0),24 },
+       { IPv4(212,162,235,0),24 },
+       { IPv4(212,162,236,0),22 },
+       { IPv4(212,162,240,0),20 },
+       { IPv4(212,162,240,0),24 },
+       { IPv4(212,163,0,0),16 },
+       { IPv4(212,163,0,0),17 },
+       { IPv4(212,163,35,0),24 },
+       { IPv4(212,163,36,0),24 },
+       { IPv4(212,163,200,0),22 },
+       { IPv4(212,166,64,0),19 },
+       { IPv4(212,166,96,0),19 },
+       { IPv4(212,166,128,0),17 },
+       { IPv4(212,168,0,0),16 },
+       { IPv4(212,169,0,0),18 },
+       { IPv4(212,173,0,0),16 },
+       { IPv4(212,183,192,0),18 },
+       { IPv4(212,188,128,0),19 },
+       { IPv4(212,188,160,0),19 },
+       { IPv4(212,188,176,0),20 },
+       { IPv4(212,189,0,0),17 },
+       { IPv4(212,189,128,0),17 },
+       { IPv4(212,197,128,0),19 },
+       { IPv4(212,203,0,0),19 },
+       { IPv4(212,204,0,0),19 },
+       { IPv4(212,204,128,0),18 },
+       { IPv4(212,207,0,0),16 },
+       { IPv4(212,211,128,0),17 },
+       { IPv4(212,225,0,0),17 },
+       { IPv4(212,228,0,0),15 },
+       { IPv4(212,230,0,0),15 },
+       { IPv4(212,236,0,0),16 },
+       { IPv4(212,238,0,0),16 },
+       { IPv4(212,239,0,0),17 },
+       { IPv4(212,239,128,0),17 },
+       { IPv4(212,240,0,0),16 },
+       { IPv4(212,241,64,0),18 },
+       { IPv4(212,250,0,0),16 },
+       { IPv4(212,252,0,0),16 },
+       { IPv4(212,252,0,0),15 },
+       { IPv4(212,252,0,0),22 },
+       { IPv4(212,252,4,0),22 },
+       { IPv4(212,252,168,0),23 },
+       { IPv4(212,252,172,0),22 },
+       { IPv4(212,253,0,0),22 },
+       { IPv4(212,253,0,0),17 },
+       { IPv4(212,253,4,0),22 },
+       { IPv4(212,253,128,0),18 },
+       { IPv4(212,253,192,0),18 },
+       { IPv4(213,1,0,0),16 },
+       { IPv4(213,2,0,0),16 },
+       { IPv4(213,2,216,0),21 },
+       { IPv4(213,2,224,0),20 },
+       { IPv4(213,2,224,0),19 },
+       { IPv4(213,5,128,0),17 },
+       { IPv4(213,9,128,0),17 },
+       { IPv4(213,9,193,0),24 },
+       { IPv4(213,10,0,0),16 },
+       { IPv4(213,15,0,0),17 },
+       { IPv4(213,15,128,0),18 },
+       { IPv4(213,15,192,0),19 },
+       { IPv4(213,15,224,0),19 },
+       { IPv4(213,18,0,0),16 },
+       { IPv4(213,31,0,0),16 },
+       { IPv4(213,31,192,0),22 },
+       { IPv4(213,33,76,0),24 },
+       { IPv4(213,35,0,0),16 },
+       { IPv4(213,35,96,0),19 },
+       { IPv4(213,37,0,0),16 },
+       { IPv4(213,43,0,0),16 },
+       { IPv4(213,48,0,0),16 },
+       { IPv4(213,51,0,0),16 },
+       { IPv4(213,52,128,0),19 },
+       { IPv4(213,52,128,0),17 },
+       { IPv4(213,52,160,0),21 },
+       { IPv4(213,52,192,0),21 },
+       { IPv4(213,60,0,0),16 },
+       { IPv4(213,63,0,0),17 },
+       { IPv4(213,68,222,0),23 },
+       { IPv4(213,69,21,0),24 },
+       { IPv4(213,72,0,0),16 },
+       { IPv4(213,74,0,0),16 },
+       { IPv4(213,75,0,0),16 },
+       { IPv4(213,78,10,0),23 },
+       { IPv4(213,78,12,0),22 },
+       { IPv4(213,78,16,0),20 },
+       { IPv4(213,78,32,0),21 },
+       { IPv4(213,78,40,0),22 },
+       { IPv4(213,81,0,0),17 },
+       { IPv4(213,83,0,0),18 },
+       { IPv4(213,92,0,0),17 },
+       { IPv4(213,94,0,0),18 },
+       { IPv4(213,104,0,0),14 },
+       { IPv4(213,109,0,0),16 },
+       { IPv4(213,112,0,0),14 },
+       { IPv4(213,120,0,0),14 },
+       { IPv4(213,120,83,0),24 },
+       { IPv4(213,120,89,0),24 },
+       { IPv4(213,120,90,0),24 },
+       { IPv4(213,121,50,0),24 },
+       { IPv4(213,128,32,0),19 },
+       { IPv4(213,128,128,0),19 },
+       { IPv4(213,129,64,0),19 },
+       { IPv4(213,130,32,0),19 },
+       { IPv4(213,130,160,0),19 },
+       { IPv4(213,133,32,0),19 },
+       { IPv4(213,133,64,0),19 },
+       { IPv4(213,136,0,0),19 },
+       { IPv4(213,136,128,0),19 },
+       { IPv4(213,137,0,0),20 },
+       { IPv4(213,137,16,0),20 },
+       { IPv4(213,137,64,0),19 },
+       { IPv4(213,137,65,0),24 },
+       { IPv4(213,137,71,0),24 },
+       { IPv4(213,137,73,0),24 },
+       { IPv4(213,137,79,0),24 },
+       { IPv4(213,137,95,0),24 },
+       { IPv4(213,137,128,0),22 },
+       { IPv4(213,137,160,0),22 },
+       { IPv4(213,137,164,0),22 },
+       { IPv4(213,137,168,0),22 },
+       { IPv4(213,137,176,0),22 },
+       { IPv4(213,137,180,0),22 },
+       { IPv4(213,137,184,0),22 },
+       { IPv4(213,137,188,0),22 },
+       { IPv4(213,137,192,0),19 },
+       { IPv4(213,138,0,0),19 },
+       { IPv4(213,138,160,0),19 },
+       { IPv4(213,141,0,0),19 },
+       { IPv4(213,141,64,0),19 },
+       { IPv4(213,142,0,0),19 },
+       { IPv4(213,142,128,0),19 },
+       { IPv4(213,142,160,0),19 },
+       { IPv4(213,143,0,0),19 },
+       { IPv4(213,143,128,0),19 },
+       { IPv4(213,144,32,0),19 },
+       { IPv4(213,145,128,0),19 },
+       { IPv4(213,147,0,0),19 },
+       { IPv4(213,147,64,0),24 },
+       { IPv4(213,147,65,0),24 },
+       { IPv4(213,147,66,0),24 },
+       { IPv4(213,147,160,0),19 },
+       { IPv4(213,148,64,0),21 },
+       { IPv4(213,153,128,0),17 },
+       { IPv4(213,154,0,0),19 },
+       { IPv4(213,154,147,0),24 },
+       { IPv4(213,156,0,0),19 },
+       { IPv4(213,156,68,0),24 },
+       { IPv4(213,156,69,0),24 },
+       { IPv4(213,156,70,0),24 },
+       { IPv4(213,156,73,0),24 },
+       { IPv4(213,157,64,0),19 },
+       { IPv4(213,157,192,0),19 },
+       { IPv4(213,160,32,0),19 },
+       { IPv4(213,160,64,0),19 },
+       { IPv4(213,160,192,0),19 },
+       { IPv4(213,160,199,0),24 },
+       { IPv4(213,161,64,0),19 },
+       { IPv4(213,161,70,0),24 },
+       { IPv4(213,161,128,0),20 },
+       { IPv4(213,161,144,0),20 },
+       { IPv4(213,161,192,0),19 },
+       { IPv4(213,164,128,0),19 },
+       { IPv4(213,166,14,0),24 },
+       { IPv4(213,166,17,0),24 },
+       { IPv4(213,166,20,0),24 },
+       { IPv4(213,166,21,0),24 },
+       { IPv4(213,166,22,0),24 },
+       { IPv4(213,166,23,0),24 },
+       { IPv4(213,166,24,0),24 },
+       { IPv4(213,166,29,0),24 },
+       { IPv4(213,166,70,0),24 },
+       { IPv4(213,166,74,0),24 },
+       { IPv4(213,166,75,0),24 },
+       { IPv4(213,166,76,0),24 },
+       { IPv4(213,166,77,0),24 },
+       { IPv4(213,166,78,0),24 },
+       { IPv4(213,166,79,0),24 },
+       { IPv4(213,166,80,0),24 },
+       { IPv4(213,167,160,0),19 },
+       { IPv4(213,170,50,0),23 },
+       { IPv4(213,170,192,0),19 },
+       { IPv4(213,171,192,0),19 },
+       { IPv4(213,172,32,0),19 },
+       { IPv4(213,174,192,0),19 },
+       { IPv4(213,177,160,0),21 },
+       { IPv4(213,178,128,0),19 },
+       { IPv4(213,178,160,0),19 },
+       { IPv4(213,181,64,0),19 },
+       { IPv4(213,182,0,0),19 },
+       { IPv4(213,185,0,0),19 },
+       { IPv4(213,185,64,0),19 },
+       { IPv4(213,185,128,0),19 },
+       { IPv4(213,185,192,0),19 },
+       { IPv4(213,186,128,0),19 },
+       { IPv4(213,190,0,0),19 },
+       { IPv4(213,191,0,0),19 },
+       { IPv4(213,193,64,0),18 },
+       { IPv4(213,194,0,0),18 },
+       { IPv4(213,199,144,0),20 },
+       { IPv4(213,200,0,0),19 },
+       { IPv4(213,200,1,0),24 },
+       { IPv4(213,201,0,0),17 },
+       { IPv4(213,204,64,0),18 },
+       { IPv4(213,207,0,0),18 },
+       { IPv4(213,207,64,0),18 },
+       { IPv4(213,208,64,0),18 },
+       { IPv4(213,209,128,0),19 },
+       { IPv4(213,212,128,0),18 },
+       { IPv4(213,213,0,0),18 },
+       { IPv4(213,213,64,0),18 },
+       { IPv4(213,217,0,0),19 },
+       { IPv4(213,217,128,0),18 },
+       { IPv4(213,218,0,0),19 },
+       { IPv4(213,219,0,0),18 },
+       { IPv4(213,219,62,0),24 },
+       { IPv4(213,219,63,0),24 },
+       { IPv4(213,221,64,0),18 },
+       { IPv4(213,221,128,0),19 },
+       { IPv4(213,222,0,0),19 },
+       { IPv4(213,222,64,0),18 },
+       { IPv4(213,226,64,0),18 },
+       { IPv4(213,227,0,0),18 },
+       { IPv4(213,231,128,0),19 },
+       { IPv4(213,232,64,0),23 },
+       { IPv4(213,232,112,0),24 },
+       { IPv4(213,233,64,0),24 },
+       { IPv4(213,233,65,0),24 },
+       { IPv4(213,233,66,0),24 },
+       { IPv4(213,233,68,0),24 },
+       { IPv4(213,233,69,0),24 },
+       { IPv4(213,233,71,0),24 },
+       { IPv4(213,233,72,0),24 },
+       { IPv4(213,233,73,0),24 },
+       { IPv4(213,233,74,0),24 },
+       { IPv4(213,233,75,0),24 },
+       { IPv4(213,233,76,0),24 },
+       { IPv4(213,233,77,0),24 },
+       { IPv4(213,233,78,0),24 },
+       { IPv4(213,233,79,0),24 },
+       { IPv4(213,233,80,0),24 },
+       { IPv4(213,233,81,0),24 },
+       { IPv4(213,233,82,0),24 },
+       { IPv4(213,233,83,0),24 },
+       { IPv4(213,233,84,0),24 },
+       { IPv4(213,233,85,0),24 },
+       { IPv4(213,233,86,0),24 },
+       { IPv4(213,233,96,0),24 },
+       { IPv4(213,233,98,0),24 },
+       { IPv4(213,233,99,0),24 },
+       { IPv4(213,233,100,0),24 },
+       { IPv4(213,233,101,0),24 },
+       { IPv4(213,233,102,0),24 },
+       { IPv4(213,233,103,0),24 },
+       { IPv4(213,233,104,0),24 },
+       { IPv4(213,233,105,0),24 },
+       { IPv4(213,233,106,0),24 },
+       { IPv4(213,233,107,0),24 },
+       { IPv4(213,233,108,0),24 },
+       { IPv4(213,233,109,0),24 },
+       { IPv4(213,233,111,0),24 },
+       { IPv4(213,233,122,0),24 },
+       { IPv4(213,233,123,0),24 },
+       { IPv4(213,233,124,0),24 },
+       { IPv4(213,233,125,0),24 },
+       { IPv4(213,233,126,0),24 },
+       { IPv4(213,233,127,0),24 },
+       { IPv4(213,236,0,0),19 },
+       { IPv4(213,236,64,0),18 },
+       { IPv4(213,239,42,0),24 },
+       { IPv4(213,239,56,0),22 },
+       { IPv4(213,239,60,0),24 },
+       { IPv4(213,239,128,0),18 },
+       { IPv4(213,243,0,0),19 },
+       { IPv4(213,243,160,0),19 },
+       { IPv4(213,244,124,0),22 },
+       { IPv4(213,246,128,0),18 },
+       { IPv4(213,249,0,0),18 },
+       { IPv4(213,249,128,0),18 },
+       { IPv4(213,251,64,0),18 },
+       { IPv4(213,253,0,0),18 },
+       { IPv4(213,253,128,0),18 },
+       { IPv4(213,254,64,0),18 },
+       { IPv4(213,254,160,0),19 },
+       { IPv4(213,255,0,0),19 },
+       { IPv4(213,255,0,0),18 },
+       { IPv4(213,255,64,0),18 },
+       { IPv4(214,0,0,0),8 },
+       { IPv4(214,1,70,0),24 },
+       { IPv4(214,3,0,0),24 },
+       { IPv4(214,3,50,0),24 },
+       { IPv4(214,3,153,0),24 },
+       { IPv4(214,3,154,0),24 },
+       { IPv4(215,0,0,0),9 },
+       { IPv4(215,1,1,0),24 },
+       { IPv4(215,1,2,0),24 },
+       { IPv4(215,1,3,0),24 },
+       { IPv4(215,1,4,0),24 },
+       { IPv4(215,1,8,0),24 },
+       { IPv4(215,1,9,0),24 },
+       { IPv4(215,1,11,0),24 },
+       { IPv4(215,1,12,0),24 },
+       { IPv4(215,1,13,0),24 },
+       { IPv4(215,1,14,0),24 },
+       { IPv4(215,1,15,0),24 },
+       { IPv4(215,1,33,0),24 },
+       { IPv4(216,0,1,0),24 },
+       { IPv4(216,1,56,0),21 },
+       { IPv4(216,1,196,0),22 },
+       { IPv4(216,2,32,0),21 },
+       { IPv4(216,2,40,0),23 },
+       { IPv4(216,3,78,0),24 },
+       { IPv4(216,5,16,0),20 },
+       { IPv4(216,5,92,0),23 },
+       { IPv4(216,6,8,0),22 },
+       { IPv4(216,7,0,0),19 },
+       { IPv4(216,7,31,0),24 },
+       { IPv4(216,7,32,0),21 },
+       { IPv4(216,7,40,0),22 },
+       { IPv4(216,7,44,0),22 },
+       { IPv4(216,8,0,0),18 },
+       { IPv4(216,8,20,0),22 },
+       { IPv4(216,8,24,0),22 },
+       { IPv4(216,8,32,0),21 },
+       { IPv4(216,8,64,0),19 },
+       { IPv4(216,8,70,0),23 },
+       { IPv4(216,8,72,0),23 },
+       { IPv4(216,9,160,0),24 },
+       { IPv4(216,9,161,0),24 },
+       { IPv4(216,9,162,0),24 },
+       { IPv4(216,9,163,0),24 },
+       { IPv4(216,9,164,0),24 },
+       { IPv4(216,9,165,0),24 },
+       { IPv4(216,9,166,0),24 },
+       { IPv4(216,9,167,0),24 },
+       { IPv4(216,9,168,0),24 },
+       { IPv4(216,9,169,0),24 },
+       { IPv4(216,9,170,0),24 },
+       { IPv4(216,9,172,0),24 },
+       { IPv4(216,9,173,0),24 },
+       { IPv4(216,9,174,0),24 },
+       { IPv4(216,9,175,0),24 },
+       { IPv4(216,10,192,0),20 },
+       { IPv4(216,10,208,0),20 },
+       { IPv4(216,12,128,0),20 },
+       { IPv4(216,13,0,0),16 },
+       { IPv4(216,13,18,0),24 },
+       { IPv4(216,14,10,0),24 },
+       { IPv4(216,14,62,0),23 },
+       { IPv4(216,15,0,0),17 },
+       { IPv4(216,15,128,0),19 },
+       { IPv4(216,15,160,0),19 },
+       { IPv4(216,15,192,0),19 },
+       { IPv4(216,15,224,0),19 },
+       { IPv4(216,17,0,0),19 },
+       { IPv4(216,17,32,0),19 },
+       { IPv4(216,17,64,0),19 },
+       { IPv4(216,17,76,0),24 },
+       { IPv4(216,19,111,0),24 },
+       { IPv4(216,20,160,0),19 },
+       { IPv4(216,21,0,0),20 },
+       { IPv4(216,21,160,0),24 },
+       { IPv4(216,21,163,0),24 },
+       { IPv4(216,21,165,0),24 },
+       { IPv4(216,21,166,0),24 },
+       { IPv4(216,21,168,0),24 },
+       { IPv4(216,21,170,0),24 },
+       { IPv4(216,21,171,0),24 },
+       { IPv4(216,21,172,0),24 },
+       { IPv4(216,21,173,0),24 },
+       { IPv4(216,21,174,0),24 },
+       { IPv4(216,21,175,0),24 },
+       { IPv4(216,21,196,0),24 },
+       { IPv4(216,21,201,0),24 },
+       { IPv4(216,21,202,0),24 },
+       { IPv4(216,21,206,0),23 },
+       { IPv4(216,21,224,0),20 },
+       { IPv4(216,21,224,0),22 },
+       { IPv4(216,21,228,0),22 },
+       { IPv4(216,21,232,0),24 },
+       { IPv4(216,21,233,0),24 },
+       { IPv4(216,21,235,0),24 },
+       { IPv4(216,21,238,0),24 },
+       { IPv4(216,21,239,0),24 },
+       { IPv4(216,22,0,0),19 },
+       { IPv4(216,22,32,0),20 },
+       { IPv4(216,22,128,0),17 },
+       { IPv4(216,23,128,0),19 },
+       { IPv4(216,23,205,0),24 },
+       { IPv4(216,23,224,0),21 },
+       { IPv4(216,24,128,0),19 },
+       { IPv4(216,25,0,0),17 },
+       { IPv4(216,25,128,0),19 },
+       { IPv4(216,25,192,0),21 },
+       { IPv4(216,25,200,0),22 },
+       { IPv4(216,25,204,0),24 },
+       { IPv4(216,25,205,0),24 },
+       { IPv4(216,25,206,0),24 },
+       { IPv4(216,25,207,0),24 },
+       { IPv4(216,26,80,0),20 },
+       { IPv4(216,26,128,0),18 },
+       { IPv4(216,26,152,0),21 },
+       { IPv4(216,27,128,0),19 },
+       { IPv4(216,27,176,0),20 },
+       { IPv4(216,28,0,0),15 },
+       { IPv4(216,28,0,0),21 },
+       { IPv4(216,28,34,0),24 },
+       { IPv4(216,28,38,0),24 },
+       { IPv4(216,28,39,0),24 },
+       { IPv4(216,28,46,0),24 },
+       { IPv4(216,28,48,0),20 },
+       { IPv4(216,28,64,0),22 },
+       { IPv4(216,28,69,0),24 },
+       { IPv4(216,28,70,0),23 },
+       { IPv4(216,28,80,0),20 },
+       { IPv4(216,28,98,0),24 },
+       { IPv4(216,28,104,0),24 },
+       { IPv4(216,28,106,0),24 },
+       { IPv4(216,28,107,0),24 },
+       { IPv4(216,28,109,0),24 },
+       { IPv4(216,28,120,0),24 },
+       { IPv4(216,28,131,0),24 },
+       { IPv4(216,28,139,0),24 },
+       { IPv4(216,28,151,0),24 },
+       { IPv4(216,28,152,0),24 },
+       { IPv4(216,28,184,0),24 },
+       { IPv4(216,28,192,0),22 },
+       { IPv4(216,28,223,0),24 },
+       { IPv4(216,28,244,0),24 },
+       { IPv4(216,28,249,0),24 },
+       { IPv4(216,28,251,0),24 },
+       { IPv4(216,29,2,0),24 },
+       { IPv4(216,29,16,0),23 },
+       { IPv4(216,29,52,0),23 },
+       { IPv4(216,29,71,0),24 },
+       { IPv4(216,29,78,0),23 },
+       { IPv4(216,29,80,0),24 },
+       { IPv4(216,29,88,0),24 },
+       { IPv4(216,29,110,0),23 },
+       { IPv4(216,29,153,0),24 },
+       { IPv4(216,29,162,0),24 },
+       { IPv4(216,29,164,0),24 },
+       { IPv4(216,29,165,0),24 },
+       { IPv4(216,29,166,0),24 },
+       { IPv4(216,29,171,0),24 },
+       { IPv4(216,29,184,0),24 },
+       { IPv4(216,29,185,0),24 },
+       { IPv4(216,29,216,0),24 },
+       { IPv4(216,29,217,0),24 },
+       { IPv4(216,29,240,0),24 },
+       { IPv4(216,30,0,0),17 },
+       { IPv4(216,30,128,0),20 },
+       { IPv4(216,31,128,0),18 },
+       { IPv4(216,32,10,0),23 },
+       { IPv4(216,32,114,0),24 },
+       { IPv4(216,32,120,0),24 },
+       { IPv4(216,32,180,0),22 },
+       { IPv4(216,32,240,0),22 },
+       { IPv4(216,32,252,0),23 },
+       { IPv4(216,33,9,0),24 },
+       { IPv4(216,33,16,0),22 },
+       { IPv4(216,33,60,0),23 },
+       { IPv4(216,33,86,0),24 },
+       { IPv4(216,33,148,0),22 },
+       { IPv4(216,33,151,0),24 },
+       { IPv4(216,33,156,0),23 },
+       { IPv4(216,33,171,0),24 },
+       { IPv4(216,33,236,0),22 },
+       { IPv4(216,33,240,0),22 },
+       { IPv4(216,33,244,0),22 },
+       { IPv4(216,34,60,0),22 },
+       { IPv4(216,34,60,0),23 },
+       { IPv4(216,34,72,0),22 },
+       { IPv4(216,35,59,0),24 },
+       { IPv4(216,36,64,0),20 },
+       { IPv4(216,36,128,0),20 },
+       { IPv4(216,36,144,0),20 },
+       { IPv4(216,36,160,0),20 },
+       { IPv4(216,36,176,0),20 },
+       { IPv4(216,37,96,0),22 },
+       { IPv4(216,37,100,0),22 },
+       { IPv4(216,37,110,0),23 },
+       { IPv4(216,37,128,0),19 },
+       { IPv4(216,37,160,0),19 },
+       { IPv4(216,37,192,0),19 },
+       { IPv4(216,37,224,0),19 },
+       { IPv4(216,38,0,0),19 },
+       { IPv4(216,38,32,0),20 },
+       { IPv4(216,38,64,0),20 },
+       { IPv4(216,38,96,0),20 },
+       { IPv4(216,38,192,0),19 },
+       { IPv4(216,38,224,0),23 },
+       { IPv4(216,39,0,0),19 },
+       { IPv4(216,39,224,0),20 },
+       { IPv4(216,39,240,0),20 },
+       { IPv4(216,39,240,0),24 },
+       { IPv4(216,40,48,0),22 },
+       { IPv4(216,40,52,0),22 },
+       { IPv4(216,41,0,0),17 },
+       { IPv4(216,41,0,0),20 },
+       { IPv4(216,41,85,0),24 },
+       { IPv4(216,41,106,0),24 },
+       { IPv4(216,41,107,0),24 },
+       { IPv4(216,42,0,0),16 },
+       { IPv4(216,44,0,0),16 },
+       { IPv4(216,46,96,0),19 },
+       { IPv4(216,46,160,0),19 },
+       { IPv4(216,47,0,0),20 },
+       { IPv4(216,47,64,0),19 },
+       { IPv4(216,47,106,0),24 },
+       { IPv4(216,47,128,0),19 },
+       { IPv4(216,47,160,0),19 },
+       { IPv4(216,49,32,0),19 },
+       { IPv4(216,49,80,0),20 },
+       { IPv4(216,49,80,0),21 },
+       { IPv4(216,49,88,0),21 },
+       { IPv4(216,49,202,0),23 },
+       { IPv4(216,49,204,0),23 },
+       { IPv4(216,49,224,0),20 },
+       { IPv4(216,50,0,0),16 },
+       { IPv4(216,51,0,0),17 },
+       { IPv4(216,51,128,0),18 },
+       { IPv4(216,51,192,0),19 },
+       { IPv4(216,52,0,0),19 },
+       { IPv4(216,52,10,0),24 },
+       { IPv4(216,52,11,0),24 },
+       { IPv4(216,52,14,0),24 },
+       { IPv4(216,52,15,0),24 },
+       { IPv4(216,52,18,0),24 },
+       { IPv4(216,52,23,0),24 },
+       { IPv4(216,52,25,0),24 },
+       { IPv4(216,52,28,0),24 },
+       { IPv4(216,52,32,0),20 },
+       { IPv4(216,52,45,0),24 },
+       { IPv4(216,52,48,0),20 },
+       { IPv4(216,52,50,0),24 },
+       { IPv4(216,52,51,0),24 },
+       { IPv4(216,52,54,0),24 },
+       { IPv4(216,52,57,0),24 },
+       { IPv4(216,52,69,0),24 },
+       { IPv4(216,52,74,0),23 },
+       { IPv4(216,52,80,0),21 },
+       { IPv4(216,52,83,0),24 },
+       { IPv4(216,52,84,0),24 },
+       { IPv4(216,52,87,0),24 },
+       { IPv4(216,52,96,0),20 },
+       { IPv4(216,52,101,0),24 },
+       { IPv4(216,52,105,0),24 },
+       { IPv4(216,52,106,0),24 },
+       { IPv4(216,52,107,0),24 },
+       { IPv4(216,52,112,0),20 },
+       { IPv4(216,52,116,0),24 },
+       { IPv4(216,52,117,0),24 },
+       { IPv4(216,52,123,0),24 },
+       { IPv4(216,52,128,0),19 },
+       { IPv4(216,52,133,0),24 },
+       { IPv4(216,52,134,0),24 },
+       { IPv4(216,52,136,0),24 },
+       { IPv4(216,52,139,0),24 },
+       { IPv4(216,52,146,0),23 },
+       { IPv4(216,52,149,0),24 },
+       { IPv4(216,52,152,0),24 },
+       { IPv4(216,52,160,0),21 },
+       { IPv4(216,52,168,0),21 },
+       { IPv4(216,52,174,0),24 },
+       { IPv4(216,52,176,0),20 },
+       { IPv4(216,52,183,0),24 },
+       { IPv4(216,52,185,0),24 },
+       { IPv4(216,52,188,0),24 },
+       { IPv4(216,52,192,0),20 },
+       { IPv4(216,52,204,0),24 },
+       { IPv4(216,52,208,0),20 },
+       { IPv4(216,52,216,0),24 },
+       { IPv4(216,52,224,0),24 },
+       { IPv4(216,52,224,0),19 },
+       { IPv4(216,52,225,0),24 },
+       { IPv4(216,52,229,0),24 },
+       { IPv4(216,52,236,0),24 },
+       { IPv4(216,52,238,0),24 },
+       { IPv4(216,52,241,0),24 },
+       { IPv4(216,52,247,0),24 },
+       { IPv4(216,52,249,0),24 },
+       { IPv4(216,52,251,0),24 },
+       { IPv4(216,53,0,0),18 },
+       { IPv4(216,53,0,0),17 },
+       { IPv4(216,53,50,0),24 },
+       { IPv4(216,53,64,0),19 },
+       { IPv4(216,53,71,0),24 },
+       { IPv4(216,53,96,0),24 },
+       { IPv4(216,53,99,0),24 },
+       { IPv4(216,53,100,0),24 },
+       { IPv4(216,54,0,0),18 },
+       { IPv4(216,54,64,0),19 },
+       { IPv4(216,54,96,0),19 },
+       { IPv4(216,54,154,0),24 },
+       { IPv4(216,54,192,0),22 },
+       { IPv4(216,54,234,0),24 },
+       { IPv4(216,54,235,0),24 },
+       { IPv4(216,54,236,0),23 },
+       { IPv4(216,54,238,0),24 },
+       { IPv4(216,55,0,0),18 },
+       { IPv4(216,57,0,0),18 },
+       { IPv4(216,57,6,0),23 },
+       { IPv4(216,57,8,0),21 },
+       { IPv4(216,57,12,0),23 },
+       { IPv4(216,57,14,0),23 },
+       { IPv4(216,57,20,0),22 },
+       { IPv4(216,57,26,0),23 },
+       { IPv4(216,57,128,0),19 },
+       { IPv4(216,57,144,0),24 },
+       { IPv4(216,57,146,0),23 },
+       { IPv4(216,57,192,0),20 },
+       { IPv4(216,57,208,0),20 },
+       { IPv4(216,58,0,0),17 },
+       { IPv4(216,58,64,0),18 },
+       { IPv4(216,58,128,0),19 },
+       { IPv4(216,58,160,0),20 },
+       { IPv4(216,59,93,0),24 },
+       { IPv4(216,59,184,0),21 },
+       { IPv4(216,59,192,0),19 },
+       { IPv4(216,61,0,0),19 },
+       { IPv4(216,63,144,0),21 },
+       { IPv4(216,63,152,0),21 },
+       { IPv4(216,63,158,0),24 },
+       { IPv4(216,64,128,0),21 },
+       { IPv4(216,64,132,0),24 },
+       { IPv4(216,64,134,0),23 },
+       { IPv4(216,64,136,0),21 },
+       { IPv4(216,64,136,0),23 },
+       { IPv4(216,64,138,0),23 },
+       { IPv4(216,64,140,0),23 },
+       { IPv4(216,64,142,0),23 },
+       { IPv4(216,64,144,0),21 },
+       { IPv4(216,64,145,0),24 },
+       { IPv4(216,64,146,0),24 },
+       { IPv4(216,64,147,0),24 },
+       { IPv4(216,64,149,0),24 },
+       { IPv4(216,64,152,0),22 },
+       { IPv4(216,64,153,0),24 },
+       { IPv4(216,64,154,0),24 },
+       { IPv4(216,64,155,0),24 },
+       { IPv4(216,64,156,0),22 },
+       { IPv4(216,64,158,0),24 },
+       { IPv4(216,64,159,0),24 },
+       { IPv4(216,64,160,0),21 },
+       { IPv4(216,64,161,0),24 },
+       { IPv4(216,64,176,0),21 },
+       { IPv4(216,64,178,0),24 },
+       { IPv4(216,64,179,0),24 },
+       { IPv4(216,64,180,0),24 },
+       { IPv4(216,64,181,0),24 },
+       { IPv4(216,64,184,0),21 },
+       { IPv4(216,65,0,0),17 },
+       { IPv4(216,66,0,0),19 },
+       { IPv4(216,66,32,0),22 },
+       { IPv4(216,66,32,0),19 },
+       { IPv4(216,66,64,0),19 },
+       { IPv4(216,66,74,0),23 },
+       { IPv4(216,66,96,0),20 },
+       { IPv4(216,67,192,0),20 },
+       { IPv4(216,67,208,0),20 },
+       { IPv4(216,68,0,0),16 },
+       { IPv4(216,68,76,0),24 },
+       { IPv4(216,68,77,0),24 },
+       { IPv4(216,68,78,0),24 },
+       { IPv4(216,68,84,0),24 },
+       { IPv4(216,68,136,0),22 },
+       { IPv4(216,68,140,0),22 },
+       { IPv4(216,68,160,0),22 },
+       { IPv4(216,68,164,0),22 },
+       { IPv4(216,68,168,0),21 },
+       { IPv4(216,68,176,0),21 },
+       { IPv4(216,68,184,0),22 },
+       { IPv4(216,69,96,0),24 },
+       { IPv4(216,70,128,0),18 },
+       { IPv4(216,70,163,0),24 },
+       { IPv4(216,70,171,0),24 },
+       { IPv4(216,70,172,0),24 },
+       { IPv4(216,70,190,0),24 },
+       { IPv4(216,70,224,0),19 },
+       { IPv4(216,71,43,0),24 },
+       { IPv4(216,71,53,0),24 },
+       { IPv4(216,71,54,0),23 },
+       { IPv4(216,72,10,0),24 },
+       { IPv4(216,72,16,0),24 },
+       { IPv4(216,72,17,0),24 },
+       { IPv4(216,72,18,0),24 },
+       { IPv4(216,72,19,0),24 },
+       { IPv4(216,72,20,0),24 },
+       { IPv4(216,72,95,0),24 },
+       { IPv4(216,73,0,0),18 },
+       { IPv4(216,73,5,0),24 },
+       { IPv4(216,73,10,0),24 },
+       { IPv4(216,73,11,0),24 },
+       { IPv4(216,73,12,0),24 },
+       { IPv4(216,73,13,0),24 },
+       { IPv4(216,73,14,0),24 },
+       { IPv4(216,73,15,0),24 },
+       { IPv4(216,73,16,0),24 },
+       { IPv4(216,73,17,0),24 },
+       { IPv4(216,73,26,0),24 },
+       { IPv4(216,73,96,0),20 },
+       { IPv4(216,73,128,0),18 },
+       { IPv4(216,73,176,0),20 },
+       { IPv4(216,74,0,0),18 },
+       { IPv4(216,75,64,0),19 },
+       { IPv4(216,75,132,0),22 },
+       { IPv4(216,75,136,0),24 },
+       { IPv4(216,75,137,0),24 },
+       { IPv4(216,79,170,0),24 },
+       { IPv4(216,80,0,0),17 },
+       { IPv4(216,80,38,0),24 },
+       { IPv4(216,81,128,0),17 },
+       { IPv4(216,82,80,0),24 },
+       { IPv4(216,82,80,0),21 },
+       { IPv4(216,82,104,0),22 },
+       { IPv4(216,82,108,0),24 },
+       { IPv4(216,82,113,0),24 },
+       { IPv4(216,82,114,0),24 },
+       { IPv4(216,82,114,0),23 },
+       { IPv4(216,82,115,0),24 },
+       { IPv4(216,82,116,0),23 },
+       { IPv4(216,82,118,0),24 },
+       { IPv4(216,82,123,0),24 },
+       { IPv4(216,82,124,0),23 },
+       { IPv4(216,82,126,0),24 },
+       { IPv4(216,82,192,0),20 },
+       { IPv4(216,82,224,0),24 },
+       { IPv4(216,82,228,0),24 },
+       { IPv4(216,82,229,0),24 },
+       { IPv4(216,82,233,0),24 },
+       { IPv4(216,82,236,0),24 },
+       { IPv4(216,82,237,0),24 },
+       { IPv4(216,82,238,0),24 },
+       { IPv4(216,82,239,0),24 },
+       { IPv4(216,83,0,0),19 },
+       { IPv4(216,83,64,0),20 },
+       { IPv4(216,83,160,0),19 },
+       { IPv4(216,84,0,0),16 },
+       { IPv4(216,84,78,0),24 },
+       { IPv4(216,84,92,0),24 },
+       { IPv4(216,84,93,0),24 },
+       { IPv4(216,84,94,0),24 },
+       { IPv4(216,84,218,0),24 },
+       { IPv4(216,85,0,0),16 },
+       { IPv4(216,85,126,0),24 },
+       { IPv4(216,85,127,0),24 },
+       { IPv4(216,85,228,0),24 },
+       { IPv4(216,85,233,0),24 },
+       { IPv4(216,85,234,0),23 },
+       { IPv4(216,85,236,0),22 },
+       { IPv4(216,85,240,0),20 },
+       { IPv4(216,86,32,0),19 },
+       { IPv4(216,86,64,0),19 },
+       { IPv4(216,86,96,0),19 },
+       { IPv4(216,86,128,0),20 },
+       { IPv4(216,86,160,0),20 },
+       { IPv4(216,86,224,0),20 },
+       { IPv4(216,86,240,0),20 },
+       { IPv4(216,87,64,0),19 },
+       { IPv4(216,87,128,0),19 },
+       { IPv4(216,87,192,0),20 },
+       { IPv4(216,87,208,0),20 },
+       { IPv4(216,88,200,0),21 },
+       { IPv4(216,88,211,0),24 },
+       { IPv4(216,89,68,0),22 },
+       { IPv4(216,89,72,0),21 },
+       { IPv4(216,89,80,0),21 },
+       { IPv4(216,89,238,0),23 },
+       { IPv4(216,89,244,0),23 },
+       { IPv4(216,90,0,0),23 },
+       { IPv4(216,90,40,0),22 },
+       { IPv4(216,90,72,0),22 },
+       { IPv4(216,90,228,0),22 },
+       { IPv4(216,91,114,0),23 },
+       { IPv4(216,91,116,0),22 },
+       { IPv4(216,91,130,0),24 },
+       { IPv4(216,92,0,0),16 },
+       { IPv4(216,94,86,0),24 },
+       { IPv4(216,94,112,0),24 },
+       { IPv4(216,94,168,0),24 },
+       { IPv4(216,94,179,0),24 },
+       { IPv4(216,94,180,0),24 },
+       { IPv4(216,96,128,0),18 },
+       { IPv4(216,97,128,0),19 },
+       { IPv4(216,98,32,0),20 },
+       { IPv4(216,98,128,0),19 },
+       { IPv4(216,98,160,0),20 },
+       { IPv4(216,98,176,0),24 },
+       { IPv4(216,98,177,0),24 },
+       { IPv4(216,98,178,0),24 },
+       { IPv4(216,98,179,0),24 },
+       { IPv4(216,98,180,0),24 },
+       { IPv4(216,98,181,0),24 },
+       { IPv4(216,98,182,0),24 },
+       { IPv4(216,98,183,0),24 },
+       { IPv4(216,98,184,0),24 },
+       { IPv4(216,98,185,0),24 },
+       { IPv4(216,98,186,0),24 },
+       { IPv4(216,98,187,0),24 },
+       { IPv4(216,98,188,0),24 },
+       { IPv4(216,98,189,0),24 },
+       { IPv4(216,98,190,0),24 },
+       { IPv4(216,98,191,0),24 },
+       { IPv4(216,98,192,0),20 },
+       { IPv4(216,99,32,0),19 },
+       { IPv4(216,99,103,0),24 },
+       { IPv4(216,99,128,0),21 },
+       { IPv4(216,99,136,0),22 },
+       { IPv4(216,99,224,0),19 },
+       { IPv4(216,100,88,0),21 },
+       { IPv4(216,100,96,0),19 },
+       { IPv4(216,101,0,0),20 },
+       { IPv4(216,101,95,0),24 },
+       { IPv4(216,101,143,0),24 },
+       { IPv4(216,101,254,0),24 },
+       { IPv4(216,102,102,0),24 },
+       { IPv4(216,102,182,0),24 },
+       { IPv4(216,102,255,0),24 },
+       { IPv4(216,103,160,0),20 },
+       { IPv4(216,104,48,0),21 },
+       { IPv4(216,104,96,0),19 },
+       { IPv4(216,104,160,0),19 },
+       { IPv4(216,105,0,0),19 },
+       { IPv4(216,105,128,0),19 },
+       { IPv4(216,105,160,0),20 },
+       { IPv4(216,105,192,0),20 },
+       { IPv4(216,106,192,0),22 },
+       { IPv4(216,107,0,0),18 },
+       { IPv4(216,108,192,0),20 },
+       { IPv4(216,109,104,0),24 },
+       { IPv4(216,109,128,0),19 },
+       { IPv4(216,109,224,0),20 },
+       { IPv4(216,109,240,0),20 },
+       { IPv4(216,110,32,0),20 },
+       { IPv4(216,110,128,0),18 },
+       { IPv4(216,111,74,0),24 },
+       { IPv4(216,111,138,0),24 },
+       { IPv4(216,111,144,0),23 },
+       { IPv4(216,111,166,0),23 },
+       { IPv4(216,111,243,0),24 },
+       { IPv4(216,112,0,0),16 },
+       { IPv4(216,112,17,0),24 },
+       { IPv4(216,112,28,0),24 },
+       { IPv4(216,112,40,0),21 },
+       { IPv4(216,112,52,0),24 },
+       { IPv4(216,112,54,0),24 },
+       { IPv4(216,112,56,0),24 },
+       { IPv4(216,112,116,0),24 },
+       { IPv4(216,112,126,0),23 },
+       { IPv4(216,112,132,0),22 },
+       { IPv4(216,112,152,0),24 },
+       { IPv4(216,112,176,0),22 },
+       { IPv4(216,112,188,0),22 },
+       { IPv4(216,112,194,0),23 },
+       { IPv4(216,112,196,0),23 },
+       { IPv4(216,112,199,0),24 },
+       { IPv4(216,112,240,0),22 },
+       { IPv4(216,113,64,0),22 },
+       { IPv4(216,113,128,0),19 },
+       { IPv4(216,113,192,0),20 },
+       { IPv4(216,114,128,0),18 },
+       { IPv4(216,115,48,0),20 },
+       { IPv4(216,115,128,0),19 },
+       { IPv4(216,115,224,0),19 },
+       { IPv4(216,116,64,0),20 },
+       { IPv4(216,116,96,0),19 },
+       { IPv4(216,116,160,0),19 },
+       { IPv4(216,116,191,0),24 },
+       { IPv4(216,117,76,0),24 },
+       { IPv4(216,117,98,0),23 },
+       { IPv4(216,117,128,0),18 },
+       { IPv4(216,118,64,0),18 },
+       { IPv4(216,118,118,0),24 },
+       { IPv4(216,118,196,0),22 },
+       { IPv4(216,119,96,0),19 },
+       { IPv4(216,119,192,0),20 },
+       { IPv4(216,120,0,0),17 },
+       { IPv4(216,120,4,0),23 },
+       { IPv4(216,120,16,0),23 },
+       { IPv4(216,120,128,0),20 },
+       { IPv4(216,120,144,0),20 },
+       { IPv4(216,120,160,0),21 },
+       { IPv4(216,120,168,0),24 },
+       { IPv4(216,120,169,0),24 },
+       { IPv4(216,120,170,0),24 },
+       { IPv4(216,120,171,0),24 },
+       { IPv4(216,120,172,0),22 },
+       { IPv4(216,120,176,0),20 },
+       { IPv4(216,120,192,0),21 },
+       { IPv4(216,120,204,0),22 },
+       { IPv4(216,121,224,0),19 },
+       { IPv4(216,123,0,0),18 },
+       { IPv4(216,123,0,0),19 },
+       { IPv4(216,123,0,0),17 },
+       { IPv4(216,123,8,0),22 },
+       { IPv4(216,123,20,0),24 },
+       { IPv4(216,123,31,0),24 },
+       { IPv4(216,123,32,0),20 },
+       { IPv4(216,123,40,0),24 },
+       { IPv4(216,123,48,0),21 },
+       { IPv4(216,123,56,0),24 },
+       { IPv4(216,123,56,0),21 },
+       { IPv4(216,123,57,0),24 },
+       { IPv4(216,123,64,0),19 },
+       { IPv4(216,123,80,0),20 },
+       { IPv4(216,123,85,0),24 },
+       { IPv4(216,123,101,0),24 },
+       { IPv4(216,123,102,0),24 },
+       { IPv4(216,123,104,0),24 },
+       { IPv4(216,123,105,0),24 },
+       { IPv4(216,123,107,0),24 },
+       { IPv4(216,123,108,0),24 },
+       { IPv4(216,123,118,0),24 },
+       { IPv4(216,123,119,0),24 },
+       { IPv4(216,123,120,0),24 },
+       { IPv4(216,123,121,0),24 },
+       { IPv4(216,123,122,0),24 },
+       { IPv4(216,123,128,0),18 },
+       { IPv4(216,124,0,0),16 },
+       { IPv4(216,124,160,0),20 },
+       { IPv4(216,124,208,0),21 },
+       { IPv4(216,125,0,0),16 },
+       { IPv4(216,125,56,0),21 },
+       { IPv4(216,126,0,0),19 },
+       { IPv4(216,127,0,0),19 },
+       { IPv4(216,127,128,0),22 },
+       { IPv4(216,127,128,0),19 },
+       { IPv4(216,127,224,0),22 },
+       { IPv4(216,129,0,0),18 },
+       { IPv4(216,129,1,0),24 },
+       { IPv4(216,129,33,0),24 },
+       { IPv4(216,129,56,0),23 },
+       { IPv4(216,129,64,0),19 },
+       { IPv4(216,129,192,0),19 },
+       { IPv4(216,130,16,0),20 },
+       { IPv4(216,130,128,0),19 },
+       { IPv4(216,131,64,0),19 },
+       { IPv4(216,131,80,0),20 },
+       { IPv4(216,132,36,0),24 },
+       { IPv4(216,132,36,0),23 },
+       { IPv4(216,132,96,0),22 },
+       { IPv4(216,133,17,0),24 },
+       { IPv4(216,135,128,0),17 },
+       { IPv4(216,135,144,0),22 },
+       { IPv4(216,135,152,0),21 },
+       { IPv4(216,135,160,0),20 },
+       { IPv4(216,135,196,0),22 },
+       { IPv4(216,136,8,0),24 },
+       { IPv4(216,136,57,0),24 },
+       { IPv4(216,136,85,0),24 },
+       { IPv4(216,136,96,0),19 },
+       { IPv4(216,136,116,0),23 },
+       { IPv4(216,136,154,0),23 },
+       { IPv4(216,136,192,0),21 },
+       { IPv4(216,136,194,0),23 },
+       { IPv4(216,136,199,0),24 },
+       { IPv4(216,137,0,0),19 },
+       { IPv4(216,137,8,0),21 },
+       { IPv4(216,137,16,0),21 },
+       { IPv4(216,137,36,0),22 },
+       { IPv4(216,137,144,0),20 },
+       { IPv4(216,137,192,0),18 },
+       { IPv4(216,138,192,0),19 },
+       { IPv4(216,138,224,0),19 },
+       { IPv4(216,139,96,0),20 },
+       { IPv4(216,139,128,0),19 },
+       { IPv4(216,140,0,0),14 },
+       { IPv4(216,140,57,0),24 },
+       { IPv4(216,140,58,0),23 },
+       { IPv4(216,140,128,0),18 },
+       { IPv4(216,140,178,0),23 },
+       { IPv4(216,140,180,0),24 },
+       { IPv4(216,140,203,0),24 },
+       { IPv4(216,141,0,0),18 },
+       { IPv4(216,141,0,0),19 },
+       { IPv4(216,141,24,0),23 },
+       { IPv4(216,141,60,0),23 },
+       { IPv4(216,141,64,0),20 },
+       { IPv4(216,141,82,0),23 },
+       { IPv4(216,141,86,0),23 },
+       { IPv4(216,141,88,0),23 },
+       { IPv4(216,141,234,0),23 },
+       { IPv4(216,142,16,0),20 },
+       { IPv4(216,142,32,0),22 },
+       { IPv4(216,142,36,0),22 },
+       { IPv4(216,142,48,0),21 },
+       { IPv4(216,142,56,0),22 },
+       { IPv4(216,142,92,0),24 },
+       { IPv4(216,142,133,0),25 },
+       { IPv4(216,142,137,0),24 },
+       { IPv4(216,142,156,0),22 },
+       { IPv4(216,142,172,0),22 },
+       { IPv4(216,142,176,0),22 },
+       { IPv4(216,142,188,0),23 },
+       { IPv4(216,142,200,0),21 },
+       { IPv4(216,142,208,0),20 },
+       { IPv4(216,142,224,0),22 },
+       { IPv4(216,142,228,0),22 },
+       { IPv4(216,142,236,0),22 },
+       { IPv4(216,142,240,0),21 },
+       { IPv4(216,142,244,0),22 },
+       { IPv4(216,142,248,0),22 },
+       { IPv4(216,142,248,0),21 },
+       { IPv4(216,142,252,0),22 },
+       { IPv4(216,143,0,0),22 },
+       { IPv4(216,143,4,0),22 },
+       { IPv4(216,143,8,0),21 },
+       { IPv4(216,143,16,0),22 },
+       { IPv4(216,143,24,0),22 },
+       { IPv4(216,143,76,0),22 },
+       { IPv4(216,143,90,0),24 },
+       { IPv4(216,143,120,0),22 },
+       { IPv4(216,143,134,0),24 },
+       { IPv4(216,143,138,0),23 },
+       { IPv4(216,143,140,0),22 },
+       { IPv4(216,143,160,0),21 },
+       { IPv4(216,143,172,0),22 },
+       { IPv4(216,143,224,0),22 },
+       { IPv4(216,143,238,0),23 },
+       { IPv4(216,143,240,0),23 },
+       { IPv4(216,143,244,0),22 },
+       { IPv4(216,144,128,0),19 },
+       { IPv4(216,144,160,0),19 },
+       { IPv4(216,145,28,0),24 },
+       { IPv4(216,145,32,0),20 },
+       { IPv4(216,145,130,0),24 },
+       { IPv4(216,145,131,0),24 },
+       { IPv4(216,145,132,0),24 },
+       { IPv4(216,145,133,0),24 },
+       { IPv4(216,145,134,0),24 },
+       { IPv4(216,145,135,0),24 },
+       { IPv4(216,146,36,0),24 },
+       { IPv4(216,146,37,0),24 },
+       { IPv4(216,146,38,0),24 },
+       { IPv4(216,146,39,0),24 },
+       { IPv4(216,146,40,0),24 },
+       { IPv4(216,146,41,0),24 },
+       { IPv4(216,146,42,0),24 },
+       { IPv4(216,146,43,0),24 },
+       { IPv4(216,146,44,0),24 },
+       { IPv4(216,146,45,0),24 },
+       { IPv4(216,146,46,0),23 },
+       { IPv4(216,146,131,0),24 },
+       { IPv4(216,146,132,0),24 },
+       { IPv4(216,146,134,0),24 },
+       { IPv4(216,146,140,0),24 },
+       { IPv4(216,146,141,0),24 },
+       { IPv4(216,146,142,0),24 },
+       { IPv4(216,146,143,0),24 },
+       { IPv4(216,146,150,0),24 },
+       { IPv4(216,146,176,0),24 },
+       { IPv4(216,146,179,0),24 },
+       { IPv4(216,146,192,0),19 },
+       { IPv4(216,147,136,0),24 },
+       { IPv4(216,147,137,0),24 },
+       { IPv4(216,147,141,0),24 },
+       { IPv4(216,148,0,0),16 },
+       { IPv4(216,148,4,0),24 },
+       { IPv4(216,148,5,0),24 },
+       { IPv4(216,148,6,0),24 },
+       { IPv4(216,148,7,0),24 },
+       { IPv4(216,148,40,0),24 },
+       { IPv4(216,148,47,0),24 },
+       { IPv4(216,148,50,0),24 },
+       { IPv4(216,148,78,0),24 },
+       { IPv4(216,148,79,0),24 },
+       { IPv4(216,148,88,0),23 },
+       { IPv4(216,148,90,0),24 },
+       { IPv4(216,148,91,0),24 },
+       { IPv4(216,148,92,0),24 },
+       { IPv4(216,148,93,0),24 },
+       { IPv4(216,148,94,0),24 },
+       { IPv4(216,148,95,0),24 },
+       { IPv4(216,148,101,0),24 },
+       { IPv4(216,148,104,0),23 },
+       { IPv4(216,148,106,0),24 },
+       { IPv4(216,148,128,0),24 },
+       { IPv4(216,148,130,0),24 },
+       { IPv4(216,148,164,0),23 },
+       { IPv4(216,148,208,0),20 },
+       { IPv4(216,148,224,0),22 },
+       { IPv4(216,148,224,0),19 },
+       { IPv4(216,149,0,0),16 },
+       { IPv4(216,149,0,0),19 },
+       { IPv4(216,149,32,0),19 },
+       { IPv4(216,149,64,0),19 },
+       { IPv4(216,149,96,0),19 },
+       { IPv4(216,149,128,0),19 },
+       { IPv4(216,149,160,0),19 },
+       { IPv4(216,149,192,0),19 },
+       { IPv4(216,150,0,0),19 },
+       { IPv4(216,150,96,0),20 },
+       { IPv4(216,150,128,0),19 },
+       { IPv4(216,150,192,0),19 },
+       { IPv4(216,151,0,0),19 },
+       { IPv4(216,151,18,0),23 },
+       { IPv4(216,151,64,0),18 },
+       { IPv4(216,151,82,0),23 },
+       { IPv4(216,151,84,0),24 },
+       { IPv4(216,151,85,0),24 },
+       { IPv4(216,151,128,0),18 },
+       { IPv4(216,151,192,0),19 },
+       { IPv4(216,152,0,0),18 },
+       { IPv4(216,152,64,0),20 },
+       { IPv4(216,153,0,0),17 },
+       { IPv4(216,156,0,0),16 },
+       { IPv4(216,157,40,0),21 },
+       { IPv4(216,157,48,0),21 },
+       { IPv4(216,157,64,0),21 },
+       { IPv4(216,157,72,0),21 },
+       { IPv4(216,157,88,0),21 },
+       { IPv4(216,157,96,0),21 },
+       { IPv4(216,157,104,0),21 },
+       { IPv4(216,158,0,0),18 },
+       { IPv4(216,158,64,0),24 },
+       { IPv4(216,158,65,0),24 },
+       { IPv4(216,158,66,0),24 },
+       { IPv4(216,158,72,0),24 },
+       { IPv4(216,158,74,0),24 },
+       { IPv4(216,158,75,0),24 },
+       { IPv4(216,158,78,0),24 },
+       { IPv4(216,158,80,0),24 },
+       { IPv4(216,158,81,0),24 },
+       { IPv4(216,158,82,0),24 },
+       { IPv4(216,158,84,0),24 },
+       { IPv4(216,158,89,0),24 },
+       { IPv4(216,158,90,0),24 },
+       { IPv4(216,158,91,0),24 },
+       { IPv4(216,158,92,0),24 },
+       { IPv4(216,158,128,0),19 },
+       { IPv4(216,159,0,0),17 },
+       { IPv4(216,159,128,0),18 },
+       { IPv4(216,159,130,0),23 },
+       { IPv4(216,159,132,0),23 },
+       { IPv4(216,160,0,0),15 },
+       { IPv4(216,160,229,0),24 },
+       { IPv4(216,161,196,0),22 },
+       { IPv4(216,162,32,0),21 },
+       { IPv4(216,162,40,0),22 },
+       { IPv4(216,162,44,0),23 },
+       { IPv4(216,162,46,0),24 },
+       { IPv4(216,162,47,0),24 },
+       { IPv4(216,162,96,0),19 },
+       { IPv4(216,162,128,0),20 },
+       { IPv4(216,163,32,0),20 },
+       { IPv4(216,163,48,0),20 },
+       { IPv4(216,163,64,0),19 },
+       { IPv4(216,163,96,0),19 },
+       { IPv4(216,163,102,0),24 },
+       { IPv4(216,163,103,0),24 },
+       { IPv4(216,163,112,0),24 },
+       { IPv4(216,163,113,0),24 },
+       { IPv4(216,163,114,0),24 },
+       { IPv4(216,163,117,0),24 },
+       { IPv4(216,163,120,0),23 },
+       { IPv4(216,163,122,0),24 },
+       { IPv4(216,163,123,0),24 },
+       { IPv4(216,163,124,0),24 },
+       { IPv4(216,163,125,0),24 },
+       { IPv4(216,163,126,0),24 },
+       { IPv4(216,163,160,0),20 },
+       { IPv4(216,163,176,0),21 },
+       { IPv4(216,163,176,0),20 },
+       { IPv4(216,163,184,0),21 },
+       { IPv4(216,163,192,0),20 },
+       { IPv4(216,163,205,0),24 },
+       { IPv4(216,163,208,0),20 },
+       { IPv4(216,163,248,0),21 },
+       { IPv4(216,164,0,0),16 },
+       { IPv4(216,165,0,0),17 },
+       { IPv4(216,165,192,0),19 },
+       { IPv4(216,166,0,0),17 },
+       { IPv4(216,166,128,0),18 },
+       { IPv4(216,167,0,0),17 },
+       { IPv4(216,167,156,0),23 },
+       { IPv4(216,167,192,0),20 },
+       { IPv4(216,168,136,0),24 },
+       { IPv4(216,168,137,0),24 },
+       { IPv4(216,168,160,0),24 },
+       { IPv4(216,168,161,0),24 },
+       { IPv4(216,168,162,0),23 },
+       { IPv4(216,168,164,0),22 },
+       { IPv4(216,168,168,0),22 },
+       { IPv4(216,168,192,0),19 },
+       { IPv4(216,168,224,0),19 },
+       { IPv4(216,168,252,0),24 },
+       { IPv4(216,168,253,0),24 },
+       { IPv4(216,168,254,0),24 },
+       { IPv4(216,169,32,0),19 },
+       { IPv4(216,169,136,0),24 },
+       { IPv4(216,169,144,0),20 },
+       { IPv4(216,169,160,0),19 },
+       { IPv4(216,169,242,0),23 },
+       { IPv4(216,170,64,0),19 },
+       { IPv4(216,170,188,0),22 },
+       { IPv4(216,171,42,0),23 },
+       { IPv4(216,171,64,0),20 },
+       { IPv4(216,171,128,0),19 },
+       { IPv4(216,171,141,0),24 },
+       { IPv4(216,172,36,0),24 },
+       { IPv4(216,172,76,0),24 },
+       { IPv4(216,172,110,0),24 },
+       { IPv4(216,172,111,0),24 },
+       { IPv4(216,172,198,0),24 },
+       { IPv4(216,172,199,0),24 },
+       { IPv4(216,173,0,0),18 },
+       { IPv4(216,173,16,0),23 },
+       { IPv4(216,173,128,0),19 },
+       { IPv4(216,173,136,0),24 },
+       { IPv4(216,174,64,0),20 },
+       { IPv4(216,174,80,0),21 },
+       { IPv4(216,174,88,0),22 },
+       { IPv4(216,174,93,0),24 },
+       { IPv4(216,174,94,0),23 },
+       { IPv4(216,174,96,0),20 },
+       { IPv4(216,174,116,0),22 },
+       { IPv4(216,174,120,0),21 },
+       { IPv4(216,174,160,0),22 },
+       { IPv4(216,174,164,0),22 },
+       { IPv4(216,174,168,0),23 },
+       { IPv4(216,174,192,0),18 },
+       { IPv4(216,174,228,0),22 },
+       { IPv4(216,175,40,0),21 },
+       { IPv4(216,175,41,0),24 },
+       { IPv4(216,175,48,0),20 },
+       { IPv4(216,175,56,0),22 },
+       { IPv4(216,175,60,0),23 },
+       { IPv4(216,175,62,0),24 },
+       { IPv4(216,175,63,0),24 },
+       { IPv4(216,175,80,0),20 },
+       { IPv4(216,175,96,0),19 },
+       { IPv4(216,176,160,0),20 },
+       { IPv4(216,176,168,0),24 },
+       { IPv4(216,176,169,0),24 },
+       { IPv4(216,176,170,0),24 },
+       { IPv4(216,176,171,0),24 },
+       { IPv4(216,176,224,0),22 },
+       { IPv4(216,176,224,0),20 },
+       { IPv4(216,176,232,0),22 },
+       { IPv4(216,176,236,0),23 },
+       { IPv4(216,176,239,0),24 },
+       { IPv4(216,177,0,0),19 },
+       { IPv4(216,177,160,0),19 },
+       { IPv4(216,178,0,0),19 },
+       { IPv4(216,178,0,0),24 },
+       { IPv4(216,178,5,0),24 },
+       { IPv4(216,178,64,0),19 },
+       { IPv4(216,178,98,0),23 },
+       { IPv4(216,178,100,0),23 },
+       { IPv4(216,178,105,0),24 },
+       { IPv4(216,178,106,0),23 },
+       { IPv4(216,178,108,0),24 },
+       { IPv4(216,178,113,0),24 },
+       { IPv4(216,178,140,0),22 },
+       { IPv4(216,179,0,0),18 },
+       { IPv4(216,179,0,0),19 },
+       { IPv4(216,179,12,0),22 },
+       { IPv4(216,179,32,0),19 },
+       { IPv4(216,179,64,0),19 },
+       { IPv4(216,179,128,0),19 },
+       { IPv4(216,179,139,0),24 },
+       { IPv4(216,179,160,0),19 },
+       { IPv4(216,179,192,0),18 },
+       { IPv4(216,180,0,0),17 },
+       { IPv4(216,180,112,0),20 },
+       { IPv4(216,180,128,0),19 },
+       { IPv4(216,181,0,0),16 },
+       { IPv4(216,183,32,0),20 },
+       { IPv4(216,183,96,0),22 },
+       { IPv4(216,183,100,0),23 },
+       { IPv4(216,183,102,0),24 },
+       { IPv4(216,183,104,0),23 },
+       { IPv4(216,183,106,0),24 },
+       { IPv4(216,183,107,0),24 },
+       { IPv4(216,183,108,0),23 },
+       { IPv4(216,183,110,0),23 },
+       { IPv4(216,183,114,0),23 },
+       { IPv4(216,183,116,0),24 },
+       { IPv4(216,183,117,0),24 },
+       { IPv4(216,183,118,0),24 },
+       { IPv4(216,183,119,0),24 },
+       { IPv4(216,183,120,0),22 },
+       { IPv4(216,183,124,0),24 },
+       { IPv4(216,183,125,0),24 },
+       { IPv4(216,183,126,0),23 },
+       { IPv4(216,184,64,0),19 },
+       { IPv4(216,185,32,0),19 },
+       { IPv4(216,185,64,0),20 },
+       { IPv4(216,185,64,0),19 },
+       { IPv4(216,185,80,0),22 },
+       { IPv4(216,185,84,0),22 },
+       { IPv4(216,185,88,0),21 },
+       { IPv4(216,185,88,0),22 },
+       { IPv4(216,185,92,0),22 },
+       { IPv4(216,185,96,0),20 },
+       { IPv4(216,185,96,0),19 },
+       { IPv4(216,185,112,0),20 },
+       { IPv4(216,185,192,0),20 },
+       { IPv4(216,187,64,0),23 },
+       { IPv4(216,187,66,0),23 },
+       { IPv4(216,187,68,0),23 },
+       { IPv4(216,187,70,0),23 },
+       { IPv4(216,187,72,0),21 },
+       { IPv4(216,187,76,0),23 },
+       { IPv4(216,187,80,0),22 },
+       { IPv4(216,187,84,0),22 },
+       { IPv4(216,187,89,0),24 },
+       { IPv4(216,187,90,0),24 },
+       { IPv4(216,187,91,0),24 },
+       { IPv4(216,187,92,0),22 },
+       { IPv4(216,187,96,0),21 },
+       { IPv4(216,187,104,0),22 },
+       { IPv4(216,187,108,0),22 },
+       { IPv4(216,187,112,0),23 },
+       { IPv4(216,187,114,0),24 },
+       { IPv4(216,187,115,0),24 },
+       { IPv4(216,187,116,0),22 },
+       { IPv4(216,187,120,0),24 },
+       { IPv4(216,187,122,0),23 },
+       { IPv4(216,187,124,0),23 },
+       { IPv4(216,187,126,0),23 },
+       { IPv4(216,188,0,0),17 },
+       { IPv4(216,188,36,0),24 },
+       { IPv4(216,188,76,0),24 },
+       { IPv4(216,188,128,0),18 },
+       { IPv4(216,189,22,0),23 },
+       { IPv4(216,189,26,0),23 },
+       { IPv4(216,189,160,0),24 },
+       { IPv4(216,189,160,0),20 },
+       { IPv4(216,189,192,0),20 },
+       { IPv4(216,190,24,0),21 },
+       { IPv4(216,190,80,0),24 },
+       { IPv4(216,190,81,0),24 },
+       { IPv4(216,190,82,0),24 },
+       { IPv4(216,190,83,0),24 },
+       { IPv4(216,190,84,0),24 },
+       { IPv4(216,190,85,0),24 },
+       { IPv4(216,190,86,0),24 },
+       { IPv4(216,190,87,0),24 },
+       { IPv4(216,190,140,0),22 },
+       { IPv4(216,190,152,0),24 },
+       { IPv4(216,190,153,0),24 },
+       { IPv4(216,190,164,0),23 },
+       { IPv4(216,190,200,0),24 },
+       { IPv4(216,190,240,0),21 },
+       { IPv4(216,191,0,0),16 },
+       { IPv4(216,191,76,0),24 },
+       { IPv4(216,191,77,0),24 },
+       { IPv4(216,194,0,0),19 },
+       { IPv4(216,194,192,0),19 },
+       { IPv4(216,195,0,0),19 },
+       { IPv4(216,196,0,0),24 },
+       { IPv4(216,196,0,0),18 },
+       { IPv4(216,196,35,0),24 },
+       { IPv4(216,196,128,0),21 },
+       { IPv4(216,196,128,0),17 },
+       { IPv4(216,196,128,0),18 },
+       { IPv4(216,196,136,0),21 },
+       { IPv4(216,196,144,0),21 },
+       { IPv4(216,196,152,0),21 },
+       { IPv4(216,196,160,0),21 },
+       { IPv4(216,196,168,0),21 },
+       { IPv4(216,196,192,0),18 },
+       { IPv4(216,196,224,0),22 },
+       { IPv4(216,196,228,0),24 },
+       { IPv4(216,197,128,0),19 },
+       { IPv4(216,198,73,0),24 },
+       { IPv4(216,198,96,0),20 },
+       { IPv4(216,198,96,0),24 },
+       { IPv4(216,198,98,0),24 },
+       { IPv4(216,198,107,0),24 },
+       { IPv4(216,198,110,0),24 },
+       { IPv4(216,198,111,0),24 },
+       { IPv4(216,198,112,0),24 },
+       { IPv4(216,198,113,0),24 },
+       { IPv4(216,198,114,0),24 },
+       { IPv4(216,198,115,0),24 },
+       { IPv4(216,198,117,0),24 },
+       { IPv4(216,198,192,0),19 },
+       { IPv4(216,198,224,0),19 },
+       { IPv4(216,200,0,0),16 },
+       { IPv4(216,200,25,0),24 },
+       { IPv4(216,200,68,0),22 },
+       { IPv4(216,200,72,0),21 },
+       { IPv4(216,200,80,0),22 },
+       { IPv4(216,200,160,0),20 },
+       { IPv4(216,200,206,0),24 },
+       { IPv4(216,200,246,0),24 },
+       { IPv4(216,200,247,0),24 },
+       { IPv4(216,201,0,0),18 },
+       { IPv4(216,201,128,0),18 },
+       { IPv4(216,201,192,0),19 },
+       { IPv4(216,201,224,0),20 },
+       { IPv4(216,202,3,0),24 },
+       { IPv4(216,202,4,0),24 },
+       { IPv4(216,202,5,0),24 },
+       { IPv4(216,202,92,0),24 },
+       { IPv4(216,202,93,0),24 },
+       { IPv4(216,202,104,0),22 },
+       { IPv4(216,203,0,0),18 },
+       { IPv4(216,203,128,0),17 },
+       { IPv4(216,205,192,0),20 },
+       { IPv4(216,206,17,0),24 },
+       { IPv4(216,206,18,0),23 },
+       { IPv4(216,206,24,0),24 },
+       { IPv4(216,206,41,0),24 },
+       { IPv4(216,206,52,0),24 },
+       { IPv4(216,206,80,0),23 },
+       { IPv4(216,206,96,0),22 },
+       { IPv4(216,206,100,0),24 },
+       { IPv4(216,206,158,0),24 },
+       { IPv4(216,206,203,0),24 },
+       { IPv4(216,206,210,0),24 },
+       { IPv4(216,206,215,0),24 },
+       { IPv4(216,207,45,0),24 },
+       { IPv4(216,207,72,0),21 },
+       { IPv4(216,207,146,0),23 },
+       { IPv4(216,207,212,0),23 },
+       { IPv4(216,207,214,0),23 },
+       { IPv4(216,207,252,0),22 },
+       { IPv4(216,208,175,0),24 },
+       { IPv4(216,208,176,0),24 },
+       { IPv4(216,210,96,0),20 },
+       { IPv4(216,210,128,0),17 },
+       { IPv4(216,211,0,0),17 },
+       { IPv4(216,211,0,0),18 },
+       { IPv4(216,211,64,0),20 },
+       { IPv4(216,211,80,0),20 },
+       { IPv4(216,211,96,0),20 },
+       { IPv4(216,211,112,0),20 },
+       { IPv4(216,211,224,0),22 },
+       { IPv4(216,211,228,0),22 },
+       { IPv4(216,211,232,0),22 },
+       { IPv4(216,211,236,0),22 },
+       { IPv4(216,214,12,0),22 },
+       { IPv4(216,216,0,0),15 },
+       { IPv4(216,216,7,0),24 },
+       { IPv4(216,216,23,0),24 },
+       { IPv4(216,216,127,0),24 },
+       { IPv4(216,216,164,0),24 },
+       { IPv4(216,216,204,0),22 },
+       { IPv4(216,216,224,0),22 },
+       { IPv4(216,216,232,0),22 },
+       { IPv4(216,216,239,0),24 },
+       { IPv4(216,216,254,0),23 },
+       { IPv4(216,217,8,0),24 },
+       { IPv4(216,217,88,0),24 },
+       { IPv4(216,217,112,0),20 },
+       { IPv4(216,217,129,0),24 },
+       { IPv4(216,217,168,0),24 },
+       { IPv4(216,217,169,0),24 },
+       { IPv4(216,217,170,0),24 },
+       { IPv4(216,217,171,0),24 },
+       { IPv4(216,217,172,0),24 },
+       { IPv4(216,217,173,0),24 },
+       { IPv4(216,217,174,0),24 },
+       { IPv4(216,217,175,0),24 },
+       { IPv4(216,217,185,0),24 },
+       { IPv4(216,217,204,0),24 },
+       { IPv4(216,217,222,0),24 },
+       { IPv4(216,218,64,0),19 },
+       { IPv4(216,218,128,0),17 },
+       { IPv4(216,218,207,0),24 },
+       { IPv4(216,219,128,0),17 },
+       { IPv4(216,220,32,0),20 },
+       { IPv4(216,220,46,0),24 },
+       { IPv4(216,220,64,0),20 },
+       { IPv4(216,220,128,0),19 },
+       { IPv4(216,220,140,0),23 },
+       { IPv4(216,220,142,0),23 },
+       { IPv4(216,220,144,0),23 },
+       { IPv4(216,220,160,0),20 },
+       { IPv4(216,220,176,0),20 },
+       { IPv4(216,220,192,0),20 },
+       { IPv4(216,220,224,0),19 },
+       { IPv4(216,221,32,0),24 },
+       { IPv4(216,221,33,0),24 },
+       { IPv4(216,221,34,0),24 },
+       { IPv4(216,221,35,0),24 },
+       { IPv4(216,221,36,0),24 },
+       { IPv4(216,221,37,0),24 },
+       { IPv4(216,221,38,0),24 },
+       { IPv4(216,221,39,0),24 },
+       { IPv4(216,221,40,0),24 },
+       { IPv4(216,221,41,0),24 },
+       { IPv4(216,221,42,0),23 },
+       { IPv4(216,221,44,0),24 },
+       { IPv4(216,221,45,0),24 },
+       { IPv4(216,221,46,0),24 },
+       { IPv4(216,221,47,0),24 },
+       { IPv4(216,221,48,0),24 },
+       { IPv4(216,221,49,0),24 },
+       { IPv4(216,221,50,0),24 },
+       { IPv4(216,221,51,0),24 },
+       { IPv4(216,221,52,0),24 },
+       { IPv4(216,221,53,0),24 },
+       { IPv4(216,221,54,0),24 },
+       { IPv4(216,221,55,0),24 },
+       { IPv4(216,221,56,0),24 },
+       { IPv4(216,221,57,0),24 },
+       { IPv4(216,221,58,0),24 },
+       { IPv4(216,221,59,0),24 },
+       { IPv4(216,221,60,0),24 },
+       { IPv4(216,221,61,0),24 },
+       { IPv4(216,221,62,0),24 },
+       { IPv4(216,221,63,0),24 },
+       { IPv4(216,221,64,0),19 },
+       { IPv4(216,221,80,0),20 },
+       { IPv4(216,221,224,0),21 },
+       { IPv4(216,221,232,0),24 },
+       { IPv4(216,221,234,0),24 },
+       { IPv4(216,221,237,0),24 },
+       { IPv4(216,221,239,0),24 },
+       { IPv4(216,221,240,0),24 },
+       { IPv4(216,222,34,0),23 },
+       { IPv4(216,222,64,0),21 },
+       { IPv4(216,222,72,0),22 },
+       { IPv4(216,222,76,0),22 },
+       { IPv4(216,222,111,0),24 },
+       { IPv4(216,222,124,0),22 },
+       { IPv4(216,222,128,0),19 },
+       { IPv4(216,222,160,0),24 },
+       { IPv4(216,222,160,0),20 },
+       { IPv4(216,222,224,0),19 },
+       { IPv4(216,223,0,0),19 },
+       { IPv4(216,223,3,0),24 },
+       { IPv4(216,223,8,0),21 },
+       { IPv4(216,223,10,0),24 },
+       { IPv4(216,223,11,0),24 },
+       { IPv4(216,223,16,0),23 },
+       { IPv4(216,223,18,0),24 },
+       { IPv4(216,223,32,0),24 },
+       { IPv4(216,223,32,0),20 },
+       { IPv4(216,223,32,0),19 },
+       { IPv4(216,223,33,0),24 },
+       { IPv4(216,223,34,0),24 },
+       { IPv4(216,223,35,0),24 },
+       { IPv4(216,223,40,0),22 },
+       { IPv4(216,223,44,0),24 },
+       { IPv4(216,223,46,0),24 },
+       { IPv4(216,223,64,0),18 },
+       { IPv4(216,223,72,0),24 },
+       { IPv4(216,223,80,0),24 },
+       { IPv4(216,223,81,0),24 },
+       { IPv4(216,223,82,0),24 },
+       { IPv4(216,223,83,0),24 },
+       { IPv4(216,223,86,0),24 },
+       { IPv4(216,223,94,0),24 },
+       { IPv4(216,223,95,0),24 },
+       { IPv4(216,223,100,0),24 },
+       { IPv4(216,223,101,0),24 },
+       { IPv4(216,223,102,0),24 },
+       { IPv4(216,223,103,0),24 },
+       { IPv4(216,223,192,0),19 },
+       { IPv4(216,223,224,0),20 },
+       { IPv4(216,223,232,0),21 },
+       { IPv4(216,223,233,0),24 },
+       { IPv4(216,224,64,0),19 },
+       { IPv4(216,224,224,0),20 },
+       { IPv4(216,226,64,0),19 },
+       { IPv4(216,226,128,0),19 },
+       { IPv4(216,226,192,0),21 },
+       { IPv4(216,226,199,0),24 },
+       { IPv4(216,226,200,0),21 },
+       { IPv4(216,226,208,0),22 },
+       { IPv4(216,226,208,0),24 },
+       { IPv4(216,226,210,0),24 },
+       { IPv4(216,226,212,0),23 },
+       { IPv4(216,226,213,0),24 },
+       { IPv4(216,226,214,0),23 },
+       { IPv4(216,226,220,0),23 },
+       { IPv4(216,226,224,0),20 },
+       { IPv4(216,226,238,0),23 },
+       { IPv4(216,226,240,0),21 },
+       { IPv4(216,226,248,0),21 },
+       { IPv4(216,226,248,0),23 },
+       { IPv4(216,226,252,0),22 },
+       { IPv4(216,226,252,0),24 },
+       { IPv4(216,226,253,0),24 },
+       { IPv4(216,228,0,0),20 },
+       { IPv4(216,228,3,0),24 },
+       { IPv4(216,228,4,0),24 },
+       { IPv4(216,228,5,0),24 },
+       { IPv4(216,228,6,0),24 },
+       { IPv4(216,228,7,0),24 },
+       { IPv4(216,228,8,0),24 },
+       { IPv4(216,228,10,0),24 },
+       { IPv4(216,228,14,0),24 },
+       { IPv4(216,228,16,0),20 },
+       { IPv4(216,228,160,0),19 },
+       { IPv4(216,228,192,0),20 },
+       { IPv4(216,228,194,0),24 },
+       { IPv4(216,228,195,0),24 },
+       { IPv4(216,228,196,0),24 },
+       { IPv4(216,228,197,0),24 },
+       { IPv4(216,228,200,0),24 },
+       { IPv4(216,228,201,0),24 },
+       { IPv4(216,228,202,0),24 },
+       { IPv4(216,228,203,0),24 },
+       { IPv4(216,229,96,0),20 },
+       { IPv4(216,229,224,0),20 },
+       { IPv4(216,229,240,0),20 },
+       { IPv4(216,230,128,0),20 },
+       { IPv4(216,230,128,0),21 },
+       { IPv4(216,230,128,0),24 },
+       { IPv4(216,230,129,0),24 },
+       { IPv4(216,230,130,0),24 },
+       { IPv4(216,230,131,0),24 },
+       { IPv4(216,230,132,0),24 },
+       { IPv4(216,230,133,0),24 },
+       { IPv4(216,230,134,0),24 },
+       { IPv4(216,230,135,0),24 },
+       { IPv4(216,230,136,0),24 },
+       { IPv4(216,230,137,0),24 },
+       { IPv4(216,230,138,0),24 },
+       { IPv4(216,230,139,0),24 },
+       { IPv4(216,230,140,0),24 },
+       { IPv4(216,230,141,0),24 },
+       { IPv4(216,230,142,0),24 },
+       { IPv4(216,230,143,0),24 },
+       { IPv4(216,230,144,0),24 },
+       { IPv4(216,230,145,0),24 },
+       { IPv4(216,230,146,0),24 },
+       { IPv4(216,230,147,0),24 },
+       { IPv4(216,230,148,0),24 },
+       { IPv4(216,230,149,0),24 },
+       { IPv4(216,230,150,0),24 },
+       { IPv4(216,230,151,0),24 },
+       { IPv4(216,230,152,0),24 },
+       { IPv4(216,230,153,0),24 },
+       { IPv4(216,230,154,0),24 },
+       { IPv4(216,230,155,0),24 },
+       { IPv4(216,230,156,0),24 },
+       { IPv4(216,230,157,0),24 },
+       { IPv4(216,230,158,0),24 },
+       { IPv4(216,230,159,0),24 },
+       { IPv4(216,230,160,0),20 },
+       { IPv4(216,231,0,0),20 },
+       { IPv4(216,231,4,0),22 },
+       { IPv4(216,231,16,0),22 },
+       { IPv4(216,231,16,0),20 },
+       { IPv4(216,231,20,0),23 },
+       { IPv4(216,231,32,0),24 },
+       { IPv4(216,231,32,0),19 },
+       { IPv4(216,231,96,0),19 },
+       { IPv4(216,231,192,0),22 },
+       { IPv4(216,231,192,0),23 },
+       { IPv4(216,231,194,0),23 },
+       { IPv4(216,231,201,0),24 },
+       { IPv4(216,231,205,0),24 },
+       { IPv4(216,231,207,0),24 },
+       { IPv4(216,231,208,0),20 },
+       { IPv4(216,231,224,0),20 },
+       { IPv4(216,231,240,0),20 },
+       { IPv4(216,234,224,0),20 },
+       { IPv4(216,234,224,0),19 },
+       { IPv4(216,234,240,0),20 },
+       { IPv4(216,235,32,0),20 },
+       { IPv4(216,235,32,0),19 },
+       { IPv4(216,235,64,0),20 },
+       { IPv4(216,235,96,0),19 },
+       { IPv4(216,235,128,0),19 },
+       { IPv4(216,235,160,0),20 },
+       { IPv4(216,235,192,0),24 },
+       { IPv4(216,235,194,0),24 },
+       { IPv4(216,235,208,0),20 },
+       { IPv4(216,235,240,0),20 },
+       { IPv4(216,235,247,0),24 },
+       { IPv4(216,236,160,0),20 },
+       { IPv4(216,236,192,0),20 },
+       { IPv4(216,236,202,0),24 },
+       { IPv4(216,236,206,0),24 },
+       { IPv4(216,236,208,0),21 },
+       { IPv4(216,236,208,0),22 },
+       { IPv4(216,236,220,0),22 },
+       { IPv4(216,237,64,0),22 },
+       { IPv4(216,237,68,0),22 },
+       { IPv4(216,237,72,0),22 },
+       { IPv4(216,237,76,0),22 },
+       { IPv4(216,237,96,0),20 },
+       { IPv4(216,237,128,0),18 },
+       { IPv4(216,237,163,0),24 },
+       { IPv4(216,239,32,0),23 },
+       { IPv4(216,239,34,0),23 },
+       { IPv4(216,239,34,0),24 },
+       { IPv4(216,239,35,0),24 },
+       { IPv4(216,239,36,0),23 },
+       { IPv4(216,239,36,0),24 },
+       { IPv4(216,239,37,0),24 },
+       { IPv4(216,239,38,0),23 },
+       { IPv4(216,239,40,0),24 },
+       { IPv4(216,239,41,0),24 },
+       { IPv4(216,239,46,0),24 },
+       { IPv4(216,239,96,0),23 },
+       { IPv4(216,239,99,0),24 },
+       { IPv4(216,239,100,0),23 },
+       { IPv4(216,239,102,0),23 },
+       { IPv4(216,239,104,0),24 },
+       { IPv4(216,239,105,0),24 },
+       { IPv4(216,239,224,0),20 },
+       { IPv4(216,239,240,0),20 },
+       { IPv4(216,240,208,0),20 },
+       { IPv4(216,241,0,0),19 },
+       { IPv4(216,241,32,0),20 },
+       { IPv4(216,241,96,0),20 },
+       { IPv4(216,241,128,0),22 },
+       { IPv4(216,241,132,0),23 },
+       { IPv4(216,241,136,0),22 },
+       { IPv4(216,241,140,0),23 },
+       { IPv4(216,241,142,0),23 },
+       { IPv4(216,241,144,0),22 },
+       { IPv4(216,241,208,0),20 },
+       { IPv4(216,242,24,0),24 },
+       { IPv4(216,242,27,0),24 },
+       { IPv4(216,242,38,0),24 },
+       { IPv4(216,242,84,0),24 },
+       { IPv4(216,243,0,0),24 },
+       { IPv4(216,243,1,0),30 },
+       { IPv4(216,243,8,0),24 },
+       { IPv4(216,243,10,0),25 },
+       { IPv4(216,243,11,0),24 },
+       { IPv4(216,243,13,0),24 },
+       { IPv4(216,243,15,0),24 },
+       { IPv4(216,243,18,0),24 },
+       { IPv4(216,243,19,0),24 },
+       { IPv4(216,243,20,0),24 },
+       { IPv4(216,243,21,0),24 },
+       { IPv4(216,243,24,0),24 },
+       { IPv4(216,243,25,0),24 },
+       { IPv4(216,243,27,0),24 },
+       { IPv4(216,243,28,0),24 },
+       { IPv4(216,243,45,0),24 },
+       { IPv4(216,243,46,0),24 },
+       { IPv4(216,243,47,0),24 },
+       { IPv4(216,243,48,0),24 },
+       { IPv4(216,243,52,128),26 },
+       { IPv4(216,243,53,128),26 },
+       { IPv4(216,243,56,0),24 },
+       { IPv4(216,243,59,0),24 },
+       { IPv4(216,243,128,0),18 },
+       { IPv4(216,243,192,0),19 },
+       { IPv4(216,243,224,0),20 },
+       { IPv4(216,244,0,0),18 },
+       { IPv4(216,244,96,0),20 },
+       { IPv4(216,244,110,0),24 },
+       { IPv4(216,244,111,0),24 },
+       { IPv4(216,244,128,0),19 },
+       { IPv4(216,244,160,0),20 },
+       { IPv4(216,244,176,0),21 },
+       { IPv4(216,244,184,0),22 },
+       { IPv4(216,244,188,0),23 },
+       { IPv4(216,244,190,0),24 },
+       { IPv4(216,244,191,0),24 },
+       { IPv4(216,245,0,0),21 },
+       { IPv4(216,245,12,0),22 },
+       { IPv4(216,245,16,0),22 },
+       { IPv4(216,245,22,0),24 },
+       { IPv4(216,245,24,0),22 },
+       { IPv4(216,245,28,0),22 },
+       { IPv4(216,246,0,0),17 },
+       { IPv4(216,247,0,0),16 },
+       { IPv4(216,248,64,0),18 },
+       { IPv4(216,248,193,0),24 },
+       { IPv4(216,248,194,0),24 },
+       { IPv4(216,248,195,0),24 },
+       { IPv4(216,248,196,0),22 },
+       { IPv4(216,248,200,0),22 },
+       { IPv4(216,248,204,0),24 },
+       { IPv4(216,248,205,0),24 },
+       { IPv4(216,248,224,0),20 },
+       { IPv4(216,249,64,0),19 },
+       { IPv4(216,249,96,0),20 },
+       { IPv4(216,249,136,0),24 },
+       { IPv4(216,249,137,0),24 },
+       { IPv4(216,249,138,0),24 },
+       { IPv4(216,249,139,0),24 },
+       { IPv4(216,249,140,0),24 },
+       { IPv4(216,249,141,0),24 },
+       { IPv4(216,250,64,0),19 },
+       { IPv4(216,250,128,0),21 },
+       { IPv4(216,250,128,0),20 },
+       { IPv4(216,250,129,0),24 },
+       { IPv4(216,250,136,0),21 },
+       { IPv4(216,250,136,0),24 },
+       { IPv4(216,250,139,0),24 },
+       { IPv4(216,250,140,0),24 },
+       { IPv4(216,250,141,0),24 },
+       { IPv4(216,250,142,0),23 },
+       { IPv4(216,250,224,0),19 },
+       { IPv4(216,251,50,0),24 },
+       { IPv4(216,251,128,0),20 },
+       { IPv4(216,251,128,0),19 },
+       { IPv4(216,252,0,0),18 },
+       { IPv4(216,252,128,0),20 },
+       { IPv4(216,252,140,0),22 },
+       { IPv4(216,252,144,0),21 },
+       { IPv4(216,252,152,0),21 },
+       { IPv4(216,252,160,0),20 },
+       { IPv4(216,252,174,0),24 },
+       { IPv4(216,252,176,0),24 },
+       { IPv4(216,252,176,0),22 },
+       { IPv4(216,252,177,0),24 },
+       { IPv4(216,252,179,0),24 },
+       { IPv4(216,252,182,0),24 },
+       { IPv4(216,252,182,0),23 },
+       { IPv4(216,252,183,0),24 },
+       { IPv4(216,252,184,0),22 },
+       { IPv4(216,252,187,0),24 },
+       { IPv4(216,252,188,0),22 },
+       { IPv4(216,252,192,0),20 },
+       { IPv4(216,252,197,0),24 },
+       { IPv4(216,252,208,0),20 },
+       { IPv4(216,252,220,0),23 },
+       { IPv4(216,252,222,0),23 },
+       { IPv4(216,252,224,0),21 },
+       { IPv4(216,252,226,0),24 },
+       { IPv4(216,252,227,0),24 },
+       { IPv4(216,252,228,0),23 },
+       { IPv4(216,252,232,0),21 },
+       { IPv4(216,252,234,0),24 },
+       { IPv4(216,252,235,0),24 },
+       { IPv4(216,252,240,0),20 },
+       { IPv4(216,253,0,0),16 },
+       { IPv4(216,253,7,0),24 },
+       { IPv4(216,253,8,0),24 },
+       { IPv4(216,253,8,0),22 },
+       { IPv4(216,253,9,0),24 },
+       { IPv4(216,253,35,0),24 },
+       { IPv4(216,253,80,0),24 },
+       { IPv4(216,253,167,0),24 },
+       { IPv4(216,254,0,0),18 },
+       { IPv4(216,254,0,0),24 },
+       { IPv4(216,254,64,0),18 },
+       { IPv4(216,254,128,0),18 },
+       { IPv4(216,255,0,0),20 },
+       { IPv4(217,8,0,0),19 },
+       { IPv4(217,8,96,0),20 },
+       { IPv4(217,9,64,0),20 },
+       { IPv4(217,10,64,0),20 },
+       { IPv4(217,10,96,0),20 },
+       { IPv4(217,10,192,0),24 },
+       { IPv4(217,10,193,0),24 },
+       { IPv4(217,10,195,0),24 },
+       { IPv4(217,10,196,0),24 },
+       { IPv4(217,10,197,0),24 },
+       { IPv4(217,10,198,0),24 },
+       { IPv4(217,10,199,0),24 },
+       { IPv4(217,10,200,0),24 },
+       { IPv4(217,10,201,0),24 },
+       { IPv4(217,10,203,0),24 },
+       { IPv4(217,10,204,0),24 },
+       { IPv4(217,10,205,0),24 },
+       { IPv4(217,10,206,0),24 },
+       { IPv4(217,10,207,0),24 },
+       { IPv4(217,10,208,0),24 },
+       { IPv4(217,10,210,0),24 },
+       { IPv4(217,10,211,0),24 },
+       { IPv4(217,10,212,0),24 },
+       { IPv4(217,10,213,0),24 },
+       { IPv4(217,10,214,0),24 },
+       { IPv4(217,10,215,0),24 },
+       { IPv4(217,10,216,0),24 },
+       { IPv4(217,10,217,0),24 },
+       { IPv4(217,10,218,0),24 },
+       { IPv4(217,10,219,0),24 },
+       { IPv4(217,10,220,0),24 },
+       { IPv4(217,10,221,0),24 },
+       { IPv4(217,10,222,0),24 },
+       { IPv4(217,10,234,0),24 },
+       { IPv4(217,12,32,0),20 },
+       { IPv4(217,14,0,0),20 },
+       { IPv4(217,14,160,0),21 },
+       { IPv4(217,14,160,0),20 },
+       { IPv4(217,14,165,0),24 },
+       { IPv4(217,14,166,0),24 },
+       { IPv4(217,15,0,0),20 },
+       { IPv4(217,15,32,0),20 },
+       { IPv4(217,15,64,0),20 },
+       { IPv4(217,15,160,0),21 },
+       { IPv4(217,15,168,0),21 },
+       { IPv4(217,17,192,0),20 },
+       { IPv4(217,18,32,0),20 },
+       { IPv4(217,18,192,0),20 },
+       { IPv4(217,19,3,0),24 },
+       { IPv4(217,19,4,0),24 },
+       { IPv4(217,19,5,0),24 },
+       { IPv4(217,19,6,0),24 },
+       { IPv4(217,19,9,0),24 },
+       { IPv4(217,19,10,0),24 },
+       { IPv4(217,19,32,0),20 },
+       { IPv4(217,19,224,0),20 },
+       { IPv4(217,20,128,0),20 },
+       { IPv4(217,21,0,0),24 },
+       { IPv4(217,21,1,0),24 },
+       { IPv4(217,21,2,0),24 },
+       { IPv4(217,21,3,0),24 },
+       { IPv4(217,21,4,0),24 },
+       { IPv4(217,21,8,0),24 },
+       { IPv4(217,21,51,0),24 },
+       { IPv4(217,21,128,0),20 },
+       { IPv4(217,22,0,0),20 },
+       { IPv4(217,23,224,0),20 },
+       { IPv4(217,24,128,0),20 },
+       { IPv4(217,24,224,0),20 },
+       { IPv4(217,25,64,0),20 },
+       { IPv4(217,26,33,0),24 },
+       { IPv4(217,26,160,0),24 },
+       { IPv4(217,27,0,0),23 },
+       { IPv4(217,27,2,0),23 },
+       { IPv4(217,27,32,0),24 },
+       { IPv4(217,27,33,0),24 },
+       { IPv4(217,27,34,0),24 },
+       { IPv4(217,27,35,0),24 },
+       { IPv4(217,27,36,0),24 },
+       { IPv4(217,27,37,0),24 },
+       { IPv4(217,28,192,0),20 },
+       { IPv4(217,29,32,0),20 },
+       { IPv4(217,29,96,0),20 },
+       { IPv4(217,29,192,0),23 },
+       { IPv4(217,29,194,0),23 },
+       { IPv4(217,31,64,0),20 },
+       { IPv4(217,32,0,0),12 },
+       { IPv4(217,64,96,0),20 },
+       { IPv4(217,66,32,0),20 },
+       { IPv4(217,66,128,0),20 },
+       { IPv4(217,66,160,0),20 },
+       { IPv4(217,67,64,0),20 },
+       { IPv4(217,67,224,0),20 },
+       { IPv4(217,68,32,0),20 },
+       { IPv4(217,68,224,0),23 },
+       { IPv4(217,69,0,0),20 },
+       { IPv4(217,69,64,0),20 },
+       { IPv4(217,71,0,0),22 },
+       { IPv4(217,71,10,0),24 },
+       { IPv4(217,75,64,0),20 },
+       { IPv4(217,76,160,0),20 },
+       { IPv4(217,76,192,0),20 },
+       { IPv4(217,77,128,0),19 },
+       { IPv4(217,114,160,0),20 },
+       { IPv4(217,114,192,0),24 },
+       { IPv4(217,115,192,0),20 },
+       { IPv4(217,115,193,0),24 },
+       { IPv4(217,115,197,0),24 },
+       { IPv4(217,115,224,0),20 },
+       { IPv4(217,116,0,0),20 },
+       { IPv4(217,116,160,0),20 },
+       { IPv4(217,117,0,0),20 },
+       { IPv4(217,117,32,0),19 },
+       { IPv4(217,117,96,0),20 },
+       { IPv4(217,118,128,0),20 },
+       { IPv4(217,119,96,0),19 },
+       { IPv4(217,119,192,0),20 },
+       { IPv4(217,120,0,0),14 },
+       { IPv4(217,131,0,0),16 },
+       { IPv4(217,131,0,0),17 },
+       { IPv4(217,131,128,0),17 },
+       { IPv4(217,137,250,0),24 },
+       { IPv4(217,138,0,0),16 },
+       { IPv4(217,140,0,0),20 },
+       { IPv4(217,140,16,0),20 },
+       { IPv4(217,145,64,0),20 },
+       { IPv4(217,145,72,0),21 },
+       { IPv4(217,146,96,0),20 },
+       { IPv4(217,148,40,0),21 },
+       { IPv4(217,148,160,0),20 },
+       { IPv4(217,148,160,0),24 },
+       { IPv4(217,148,161,0),24 },
+       { IPv4(217,148,192,0),20 },
+       { IPv4(217,149,64,0),20 },
+       { IPv4(217,150,128,0),20 },
+       { IPv4(217,151,0,0),20 },
+       { IPv4(217,151,208,0),20 },
+       { IPv4(217,154,0,0),16 },
+       { IPv4(217,156,8,0),24 },
+       { IPv4(217,156,18,0),24 },
+       { IPv4(217,156,36,0),24 },
+       { IPv4(217,156,42,0),24 },
+       { IPv4(217,156,56,0),24 },
+       { IPv4(217,156,75,0),24 },
+       { IPv4(217,162,0,0),16 },
+       { IPv4(217,166,0,0),16 },
+       { IPv4(217,169,0,0),19 },
+       { IPv4(217,169,160,0),20 },
+       { IPv4(217,169,224,0),20 },
+       { IPv4(217,170,32,0),20 },
+       { IPv4(217,170,192,0),20 },
+       { IPv4(217,171,224,0),20 },
+       { IPv4(217,173,64,0),20 },
+       { IPv4(217,174,32,0),24 },
+       { IPv4(217,175,96,0),20 },
+       { IPv4(217,176,0,0),13 },
+       { IPv4(217,194,32,0),20 },
+       { IPv4(217,194,160,0),20 },
+       { IPv4(217,194,192,0),20 },
+       { IPv4(217,195,192,0),24 },
+       { IPv4(217,195,193,0),24 },
+       { IPv4(217,195,194,0),24 },
+       { IPv4(217,195,195,0),24 },
+       { IPv4(217,195,224,0),20 },
+       { IPv4(217,196,224,0),20 },
+       { IPv4(217,216,0,0),15 },
+       { IPv4(217,220,0,0),16 },
+       { IPv4(218,0,0,0),16 },
+       { IPv4(218,1,0,0),16 },
+       { IPv4(218,2,0,0),15 },
+       { IPv4(218,4,0,0),16 },
+       { IPv4(218,5,0,0),16 },
+       { IPv4(218,6,0,0),17 },
+       { IPv4(218,6,128,0),17 },
+       { IPv4(218,7,0,0),16 },
+       { IPv4(218,8,0,0),16 },
+       { IPv4(218,9,0,0),16 },
+       { IPv4(218,10,0,0),16 },
+       { IPv4(218,11,0,0),16 },
+       { IPv4(218,12,0,0),16 },
+       { IPv4(218,13,0,0),16 },
+       { IPv4(218,14,0,0),15 },
+       { IPv4(218,16,0,0),14 },
+       { IPv4(218,20,0,0),16 },
+       { IPv4(218,21,0,0),19 },
+       { IPv4(218,21,32,0),20 },
+       { IPv4(218,21,64,0),18 },
+       { IPv4(218,21,128,0),17 },
+       { IPv4(218,22,0,0),15 },
+       { IPv4(218,24,0,0),16 },
+       { IPv4(218,25,0,0),16 },
+       { IPv4(218,26,0,0),16 },
+       { IPv4(218,27,0,0),16 },
+       { IPv4(218,28,0,0),15 },
+       { IPv4(218,30,0,0),20 },
+       { IPv4(218,30,16,0),22 },
+       { IPv4(218,30,224,0),19 },
+       { IPv4(218,31,0,0),16 },
+       { IPv4(218,40,112,0),20 },
+       { IPv4(218,40,128,0),20 },
+       { IPv4(218,48,0,0),15 },
+       { IPv4(218,49,226,0),23 },
+       { IPv4(218,49,228,0),22 },
+       { IPv4(218,49,232,0),21 },
+       { IPv4(218,49,240,0),20 },
+       { IPv4(218,56,0,0),15 },
+       { IPv4(218,58,0,0),15 },
+       { IPv4(218,60,0,0),16 },
+       { IPv4(218,63,0,0),16 },
+       { IPv4(218,64,0,0),16 },
+       { IPv4(218,65,0,0),17 },
+       { IPv4(218,65,128,0),17 },
+       { IPv4(218,66,0,0),16 },
+       { IPv4(218,67,0,0),17 },
+       { IPv4(218,67,128,0),17 },
+       { IPv4(218,68,0,0),15 },
+       { IPv4(218,95,224,0),19 },
+       { IPv4(218,144,0,0),13 },
+       { IPv4(218,184,0,0),16 },
+       { IPv4(218,184,0,0),18 },
+       { IPv4(218,184,64,0),18 },
+       { IPv4(218,184,128,0),18 },
+       { IPv4(218,184,192,0),18 }
+};
+
+#define  NUM_ROUTE_ENTRIES (sizeof(mae_west_tbl) / sizeof(mae_west_tbl[0]))
+
+#endif /* _TEST_LPM_ROUTES_H_ */
diff --git a/app/test/test_malloc.c b/app/test/test_malloc.c
new file mode 100644 (file)
index 0000000..a38a6de
--- /dev/null
@@ -0,0 +1,776 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_string_fns.h>
+
+#include "test.h"
+
+#define N 10000
+
+#define QUOTE_(x) #x
+#define QUOTE(x) QUOTE_(x)
+#define MALLOC_MEMZONE_SIZE QUOTE(RTE_MALLOC_MEMZONE_SIZE)
+
+/*
+ * Malloc
+ * ======
+ *
+ * Allocate some dynamic memory from heap (3 areas). Check that areas
+ * don't overlap an that alignment constraints match. This test is
+ * done many times on different lcores simultaneously.
+ */
+
+/* Test if memory overlaps: return 1 if true, or 0 if false. */
+static int
+is_memory_overlap(void *p1, size_t len1, void *p2, size_t len2)
+{
+       unsigned long ptr1 = (unsigned long)p1;
+       unsigned long ptr2 = (unsigned long)p2;
+
+       if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
+               return 1;
+       else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
+               return 1;
+       return 0;
+}
+
+static int
+is_aligned(void *p, int align)
+{
+       unsigned long addr = (unsigned long)p;
+       unsigned mask = align - 1;
+
+       if (addr & mask)
+               return 0;
+       return 1;
+}
+
+static int
+test_align_overlap_per_lcore(__attribute__((unused)) void *arg)
+{
+       const unsigned align1 = 8,
+                       align2 = 64,
+                       align3 = 2048;
+       unsigned i,j;
+       void *p1 = NULL, *p2 = NULL, *p3 = NULL;
+       int ret = 0;
+
+       for (i = 0; i < N; i++) {
+               p1 = rte_zmalloc("dummy", 1000, align1);
+               if (!p1){
+                       printf("rte_zmalloc returned NULL (i=%u)\n", i);
+                       ret = -1;
+                       break;
+               }
+               for(j = 0; j < 1000 ; j++) {
+                       if( *(char *)p1 != 0) {
+                               printf("rte_zmalloc didn't zeroed"
+                                      "the allocated memory\n");
+                               ret = -1;
+                       }
+               }
+               p2 = rte_malloc("dummy", 1000, align2);
+               if (!p2){
+                       printf("rte_malloc returned NULL (i=%u)\n", i);
+                       ret = -1;
+                       rte_free(p1);
+                       break;
+               }
+               p3 = rte_malloc("dummy", 1000, align3);
+               if (!p3){
+                       printf("rte_malloc returned NULL (i=%u)\n", i);
+                       ret = -1;
+                       rte_free(p1);
+                       rte_free(p2);
+                       break;
+               }
+               if (is_memory_overlap(p1, 1000, p2, 1000)) {
+                       printf("p1 and p2 overlaps\n");
+                       ret = -1;
+               }
+               if (is_memory_overlap(p2, 1000, p3, 1000)) {
+                       printf("p2 and p3 overlaps\n");
+                       ret = -1;
+               }
+               if (is_memory_overlap(p1, 1000, p3, 1000)) {
+                       printf("p1 and p3 overlaps\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p1, align1)) {
+                       printf("p1 is not aligned\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p2, align2)) {
+                       printf("p2 is not aligned\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p3, align3)) {
+                       printf("p3 is not aligned\n");
+                       ret = -1;
+               }
+               rte_free(p1);
+               rte_free(p2);
+               rte_free(p3);
+       }
+       rte_malloc_dump_stats("dummy");
+
+       return ret;
+}
+
+static int
+test_reordered_free_per_lcore(__attribute__((unused)) void *arg)
+{
+       const unsigned align1 = 8,
+                       align2 = 64,
+                       align3 = 2048;
+       unsigned i,j;
+       void *p1, *p2, *p3;
+       int ret = 0;
+
+       for (i = 0; i < 30; i++) {
+               p1 = rte_zmalloc("dummy", 1000, align1);
+               if (!p1){
+                       printf("rte_zmalloc returned NULL (i=%u)\n", i);
+                       ret = -1;
+                       break;
+               }
+               for(j = 0; j < 1000 ; j++) {
+                       if( *(char *)p1 != 0) {
+                               printf("rte_zmalloc didn't zeroed"
+                                      "the allocated memory\n");
+                               ret = -1;
+                       }
+               }
+               /* use calloc to allocate 1000 16-byte items this time */
+               p2 = rte_calloc("dummy", 1000, 16, align2);
+               /* for third request use regular malloc again */
+               p3 = rte_malloc("dummy", 1000, align3);
+               if (!p2 || !p3){
+                       printf("rte_malloc returned NULL (i=%u)\n", i);
+                       ret = -1;
+                       break;
+               }
+               if (is_memory_overlap(p1, 1000, p2, 1000)) {
+                       printf("p1 and p2 overlaps\n");
+                       ret = -1;
+               }
+               if (is_memory_overlap(p2, 1000, p3, 1000)) {
+                       printf("p2 and p3 overlaps\n");
+                       ret = -1;
+               }
+               if (is_memory_overlap(p1, 1000, p3, 1000)) {
+                       printf("p1 and p3 overlaps\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p1, align1)) {
+                       printf("p1 is not aligned\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p2, align2)) {
+                       printf("p2 is not aligned\n");
+                       ret = -1;
+               }
+               if (!is_aligned(p3, align3)) {
+                       printf("p3 is not aligned\n");
+                       ret = -1;
+               }
+               /* try freeing in every possible order */
+               switch (i%6){
+               case 0:
+                       rte_free(p1);
+                       rte_free(p2);
+                       rte_free(p3);
+                       break;
+               case 1:
+                       rte_free(p1);
+                       rte_free(p3);
+                       rte_free(p2);
+                       break;
+               case 2:
+                       rte_free(p2);
+                       rte_free(p1);
+                       rte_free(p3);
+                       break;
+               case 3:
+                       rte_free(p2);
+                       rte_free(p3);
+                       rte_free(p1);
+                       break;
+               case 4:
+                       rte_free(p3);
+                       rte_free(p1);
+                       rte_free(p2);
+                       break;
+               case 5:
+                       rte_free(p3);
+                       rte_free(p2);
+                       rte_free(p1);
+                       break;
+               }
+       }
+       rte_malloc_dump_stats("dummy");
+
+       return ret;
+}
+
+
+/* test function inside the malloc lib*/
+static int
+test_str_to_size(void)
+{
+       struct {
+               const char *str;
+               uint64_t value;
+       } test_values[] =
+               {{ "5G", (uint64_t)5 * 1024 * 1024 *1024 },
+                               {"0x20g", (uint64_t)0x20 * 1024 * 1024 *1024},
+                               {"10M", 10 * 1024 * 1024},
+                               {"050m", 050 * 1024 * 1024},
+                               {"8K", 8 * 1024},
+                               {"15k", 15 * 1024},
+                               {"0200", 0200},
+                               {"0x103", 0x103},
+                               {"432", 432},
+                               {"-1", 0}, /* negative values return 0 */
+                               {"  -2", 0},
+                               {"  -3MB", 0},
+                               {"18446744073709551616", 0} /* ULLONG_MAX + 1 == out of range*/
+               };
+       unsigned i;
+       for (i = 0; i < sizeof(test_values)/sizeof(test_values[0]); i++)
+               if (rte_str_to_size(test_values[i].str) != test_values[i].value)
+                       return -1;
+       return 0;
+}
+
+static int
+test_big_alloc(void)
+{
+       void *p1 = rte_malloc("BIG", rte_str_to_size(MALLOC_MEMZONE_SIZE) * 2, 1024);
+       if (!p1)
+               return -1;
+       rte_free(p1);
+       return 0;
+}
+
+static int
+test_memzone_size_alloc(void)
+{
+       void *p1 = rte_malloc("BIG", rte_str_to_size(MALLOC_MEMZONE_SIZE) - 128, 64);
+       if (!p1)
+               return -1;
+       rte_free(p1);
+       /* one extra check - check no crashes if free(NULL) */
+       rte_free(NULL);
+       return 0;
+}
+
+static int
+test_rte_malloc_type_limits(void)
+{
+       /* The type-limits functionality is not yet implemented,
+        * so always return 0 no matter what the retval.
+        */
+       const char *typename = "limit_test";
+       rte_malloc_set_limit(typename, 64 * 1024);
+       rte_malloc_dump_stats(typename);
+       return 0;
+}
+
+static int
+test_realloc(void)
+{
+       const char hello_str[] = "Hello, world!";
+       const unsigned size1 = 1024;
+       const unsigned size2 = size1 + 1024;
+       const unsigned size3 = size2;
+       const unsigned size4 = size3 + 1024;
+
+       /* test data is the same even if element is moved*/
+       char *ptr1 = rte_zmalloc(NULL, size1, CACHE_LINE_SIZE);
+       if (!ptr1){
+               printf("NULL pointer returned from rte_zmalloc\n");
+               return -1;
+       }
+       rte_snprintf(ptr1, size1, "%s" ,hello_str);
+       char *ptr2 = rte_realloc(ptr1, size2, CACHE_LINE_SIZE);
+       if (!ptr2){
+               rte_free(ptr1);
+               printf("NULL pointer returned from rte_realloc\n");
+               return -1;
+       }
+       if (ptr1 == ptr2){
+               printf("unexpected - ptr1 == ptr2\n");
+       }
+       if (strcmp(ptr2, hello_str) != 0){
+               printf("Error - lost data from pointed area\n");
+               rte_free(ptr2);
+               return -1;
+       }
+       unsigned i;
+       for (i = strnlen(hello_str, sizeof(hello_str)); i < size1; i++)
+               if (ptr2[i] != 0){
+                       printf("Bad data in realloc\n");
+                       rte_free(ptr2);
+                       return -1;
+               }
+       /* now allocate third element, free the second
+        * and resize third. It should not move. (ptr1 is now invalid)
+        */
+       char *ptr3 = rte_zmalloc(NULL, size3, CACHE_LINE_SIZE);
+       if (!ptr3){
+               printf("NULL pointer returned from rte_zmalloc\n");
+               rte_free(ptr2);
+               return -1;
+       }
+       for (i = 0; i < size3; i++)
+               if (ptr3[i] != 0){
+                       printf("Bad data in zmalloc\n");
+                       rte_free(ptr3);
+                       rte_free(ptr2);
+                       return -1;
+               }
+       rte_free(ptr2);
+       /* first resize to half the size of the freed block */
+       char *ptr4 = rte_realloc(ptr3, size4, CACHE_LINE_SIZE);
+       if (!ptr4){
+               printf("NULL pointer returned from rte_realloc\n");
+               rte_free(ptr3);
+               return -1;
+       }
+       if (ptr3 != ptr4){
+               printf("Unexpected - ptr4 != ptr3\n");
+               rte_free(ptr4);
+               return -1;
+       }
+       /* now resize again to the full size of the freed block */
+       ptr4 = rte_realloc(ptr3, size3 + size2 + size1, CACHE_LINE_SIZE);
+       if (ptr3 != ptr4){
+               printf("Unexpected - ptr4 != ptr3 on second resize\n");
+               rte_free(ptr4);
+               return -1;
+       }
+       rte_free(ptr4);
+
+       /* now try a resize to a smaller size, see if it works */
+       const unsigned size5 = 1024;
+       const unsigned size6 = size5 / 2;
+       char *ptr5 = rte_malloc(NULL, size5, CACHE_LINE_SIZE);
+       if (!ptr5){
+               printf("NULL pointer returned from rte_malloc\n");
+               return -1;
+       }
+       char *ptr6 = rte_realloc(ptr5, size6, CACHE_LINE_SIZE);
+       if (!ptr6){
+               printf("NULL pointer returned from rte_realloc\n");
+               rte_free(ptr5);
+               return -1;
+       }
+       if (ptr5 != ptr6){
+               printf("Error, resizing to a smaller size moved data\n");
+               rte_free(ptr6);
+               return -1;
+       }
+       rte_free(ptr6);
+
+       /* check for behaviour changing alignment */
+       const unsigned size7 = 1024;
+       const unsigned orig_align = CACHE_LINE_SIZE;
+       unsigned new_align = CACHE_LINE_SIZE * 2;
+       char *ptr7 = rte_malloc(NULL, size7, orig_align);
+       if (!ptr7){
+               printf("NULL pointer returned from rte_malloc\n");
+               return -1;
+       }
+       /* calc an alignment we don't already have */
+       while(RTE_ALIGN(ptr7, new_align) == ptr7)
+               new_align *= 2;
+       char *ptr8 = rte_realloc(ptr7, size7, new_align);
+       if (!ptr8){
+               printf("NULL pointer returned from rte_realloc\n");
+               rte_free(ptr7);
+               return -1;
+       }
+       if (RTE_ALIGN(ptr8, new_align) != ptr8){
+               printf("Failure to re-align data\n");
+               rte_free(ptr8);
+               return -1;
+       }
+       rte_free(ptr8);
+
+       /* test behaviour when there is a free block after current one,
+        * but its not big enough
+        */
+       unsigned size9 = 1024, size10 = 1024;
+       unsigned size11 = size9 + size10 + 256;
+       char *ptr9 = rte_malloc(NULL, size9, CACHE_LINE_SIZE);
+       if (!ptr9){
+               printf("NULL pointer returned from rte_malloc\n");
+               return -1;
+       }
+       char *ptr10 = rte_malloc(NULL, size10, CACHE_LINE_SIZE);
+       if (!ptr10){
+               printf("NULL pointer returned from rte_malloc\n");
+               return -1;
+       }
+       rte_free(ptr9);
+       char *ptr11 = rte_realloc(ptr10, size11, CACHE_LINE_SIZE);
+       if (!ptr11){
+               printf("NULL pointer returned from rte_realloc\n");
+               rte_free(ptr10);
+               return -1;
+       }
+       if (ptr11 == ptr10){
+               printf("Error, unexpected that realloc has not created new buffer\n");
+               rte_free(ptr11);
+               return -1;
+       }
+       rte_free(ptr11);
+
+       /* check we don't crash if we pass null to realloc
+        * We should get a malloc of the size requested*/
+       const size_t size12 = 1024;
+       size_t size12_check;
+       char *ptr12 = rte_realloc(NULL, size12, CACHE_LINE_SIZE);
+       if (!ptr12){
+               printf("NULL pointer returned from rte_realloc\n");
+               return -1;
+       }
+       if (rte_malloc_validate(ptr12, &size12_check) < 0 ||
+                       size12_check != size12){
+               rte_free(ptr12);
+               return -1;
+       }
+       rte_free(ptr12);
+       return 0;
+}
+
+static int
+test_random_alloc_free(void *_ __attribute__((unused)))
+{
+       struct mem_list {
+               struct mem_list *next;
+               char data[0];
+       } *list_head = NULL;
+       unsigned i;
+       unsigned count = 0;
+
+       rte_srand((unsigned)rte_rdtsc());
+
+       for (i = 0; i < N; i++){
+               unsigned free_mem = 0;
+               size_t allocated_size;
+               while (!free_mem){
+                       const unsigned mem_size = sizeof(struct mem_list) + \
+                               rte_rand() % (64 * 1024);
+                       const unsigned align = 1 << (rte_rand() % 12); /* up to 4k alignment */
+                       struct mem_list *entry = rte_malloc(NULL,
+                                       mem_size, align);
+                       if (entry == NULL)
+                               return -1;
+                       if (RTE_ALIGN(entry, align)!= entry)
+                               return -1;
+                       if (rte_malloc_validate(entry, &allocated_size) == -1
+                                       || allocated_size < mem_size)
+                               return -1;
+                       memset(entry->data, rte_lcore_id(),
+                                       mem_size - sizeof(*entry));
+                       entry->next = list_head;
+                       if (rte_malloc_validate(entry, NULL) == -1)
+                               return -1;
+                       list_head = entry;
+
+                       count++;
+                       /* switch to freeing the memory with a 20% probability */
+                       free_mem = ((rte_rand() % 10) >= 8);
+               }
+               while (list_head){
+                       struct mem_list *entry = list_head;
+                       list_head = list_head->next;
+                       rte_free(entry);
+               }
+       }
+       printf("Lcore %u allocated/freed %u blocks\n", rte_lcore_id(), count);
+       return 0;
+}
+
+#define err_return() do { \
+       printf("%s: %d - Error\n", __func__, __LINE__); \
+       goto err_return; \
+} while (0)
+
+static int
+test_rte_malloc_validate(void)
+{
+       const size_t request_size = 1024;
+       size_t allocated_size;
+       char *data_ptr = rte_malloc(NULL, request_size, CACHE_LINE_SIZE);
+       if (data_ptr == NULL) {
+               printf("%s: %d - Allocation error\n", __func__, __LINE__);
+               return -1;
+       }
+
+       /* check that a null input returns -1 */
+       if (rte_malloc_validate(NULL, NULL) != -1)
+               err_return();
+
+       /* check that we get ok on a valid pointer */
+       if (rte_malloc_validate(data_ptr, &allocated_size) < 0)
+               err_return();
+
+       /* check that the returned size is ok */
+       if (allocated_size < request_size)
+               err_return();
+
+#ifdef RTE_LIBRTE_MALLOC_DEBUG
+       int retval;
+       char *over_write_vals = NULL;
+
+       /****** change the header to be bad */
+       char save_buf[64];
+       over_write_vals = (char *)((uintptr_t)data_ptr - sizeof(save_buf));
+       /* first save the data as a backup before overwriting it */
+       memcpy(save_buf, over_write_vals, sizeof(save_buf));
+       memset(over_write_vals, 1, sizeof(save_buf));
+       /* then run validate */
+       retval = rte_malloc_validate(data_ptr, NULL);
+       /* finally restore the data again */
+       memcpy(over_write_vals, save_buf, sizeof(save_buf));
+       /* check we previously had an error */
+       if (retval != -1)
+               err_return();
+
+       /* check all ok again */
+       if (rte_malloc_validate(data_ptr, &allocated_size) < 0)
+               err_return();
+
+       /**** change the trailer to be bad */
+       over_write_vals = (char *)((uintptr_t)data_ptr + allocated_size);
+       /* first save the data as a backup before overwriting it */
+       memcpy(save_buf, over_write_vals, sizeof(save_buf));
+       memset(over_write_vals, 1, sizeof(save_buf));
+       /* then run validate */
+       retval = rte_malloc_validate(data_ptr, NULL);
+       /* finally restore the data again */
+       memcpy(over_write_vals, save_buf, sizeof(save_buf));
+       if (retval != -1)
+               err_return();
+
+       /* check all ok again */
+       if (rte_malloc_validate(data_ptr, &allocated_size) < 0)
+               err_return();
+#endif
+
+       rte_free(data_ptr);
+       return 0;
+
+err_return:
+       /*clean up */
+       rte_free(data_ptr);
+       return -1;
+}
+
+static int
+test_zero_aligned_alloc(void)
+{
+       char *p1 = rte_malloc(NULL,1024, 0);
+       if (!p1)
+               goto err_return;
+       if (!rte_is_aligned(p1, CACHE_LINE_SIZE))
+               goto err_return;
+       rte_free(p1);
+       return 0;
+
+err_return:
+       /*clean up */
+       if (p1) rte_free(p1);
+       return -1;
+}
+
+static int
+test_malloc_bad_params(void)
+{
+       const char *type = NULL;
+       size_t size = 0;
+       unsigned align = CACHE_LINE_SIZE;
+
+       /* rte_malloc expected to return null with inappropriate size */
+       char *bad_ptr = rte_malloc(type, size, align);
+       if (bad_ptr != NULL)
+               goto err_return;
+
+       /* rte_malloc expected to return null with inappropriate alignment */
+       align = 17;
+       size = 1024;
+
+       bad_ptr = rte_malloc(type, size, align);
+       if (bad_ptr != NULL)
+               goto err_return;
+
+       return 0;
+
+err_return:
+       /* clean up pointer */
+       if (bad_ptr)
+               rte_free(bad_ptr);
+       return -1;
+}
+
+int
+test_malloc(void)
+{
+       unsigned lcore_id;
+       int ret = 0;
+
+       if (test_str_to_size() < 0){
+               printf("test_str_to_size() failed\n");
+               return -1;
+       }
+       else printf("test_str_to_size() passed\n");
+
+       if (test_memzone_size_alloc() < 0){
+               printf("test_memzone_size_alloc() failed\n");
+               return -1;
+       }
+       else printf("test_memzone_size_alloc() passed\n");
+
+       if (test_big_alloc() < 0){
+               printf("test_big_alloc() failed\n");
+               return -1;
+       }
+       else printf("test_big_alloc() passed\n");
+
+       if (test_zero_aligned_alloc() < 0){
+               printf("test_zero_aligned_alloc() failed\n");
+               return -1;
+       }
+       else printf("test_zero_aligned_alloc() passed\n");
+
+       if (test_malloc_bad_params() < 0){
+               printf("test_malloc_bad_params() failed\n");
+               return -1;
+       }
+       else printf("test_malloc_bad_params() passed\n");
+
+       if (test_realloc() < 0){
+               printf("test_realloc() failed\n");
+               return -1;
+       }
+       else printf("test_realloc() passed\n");
+/*----------------------------*/
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(test_align_overlap_per_lcore, NULL, lcore_id);
+       }
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       ret = -1;
+       }
+       if (ret < 0){
+               printf("test_align_overlap_per_lcore() failed\n");
+               return ret;
+       }
+       else printf("test_align_overlap_per_lcore() passed\n");
+       /*----------------------------*/
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(test_reordered_free_per_lcore, NULL, lcore_id);
+       }
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       ret = -1;
+       }
+       if (ret < 0){
+               printf("test_reordered_free_per_lcore() failed\n");
+               return ret;
+       }
+       else printf("test_reordered_free_per_lcore() passed\n");
+
+       /*----------------------------*/
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(test_random_alloc_free, NULL, lcore_id);
+       }
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       ret = -1;
+       }
+       if (ret < 0){
+               printf("test_random_alloc_free() failed\n");
+               return ret;
+       }
+       else printf("test_random_alloc_free() passed\n");
+
+       /*----------------------------*/
+       ret = test_rte_malloc_type_limits();
+       if (ret < 0){
+               printf("test_rte_malloc_type_limits() failed\n");
+               return ret;
+       }
+       /* TODO: uncomment following line once type limits are valid */
+       /*else printf("test_rte_malloc_type_limits() passed\n");*/
+
+       /*----------------------------*/
+       ret = test_rte_malloc_validate();
+       if (ret < 0){
+               printf("test_rte_malloc_validate() failed\n");
+               return ret;
+       }
+       else printf("test_rte_malloc_validate() passed\n");
+
+       return 0;
+}
diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c
new file mode 100644 (file)
index 0000000..d09f87f
--- /dev/null
@@ -0,0 +1,875 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+#define MBUF_SIZE               2048
+#define NB_MBUF                 128
+#define MBUF_TEST_DATA_LEN      1464
+#define MBUF_TEST_DATA_LEN2     50
+#define MBUF_TEST_HDR1_LEN      20
+#define MBUF_TEST_HDR2_LEN      30
+#define MBUF_TEST_ALL_HDRS_LEN  (MBUF_TEST_HDR1_LEN+MBUF_TEST_HDR2_LEN)
+
+#define REFCNT_MAX_ITER         64
+#define REFCNT_MAX_TIMEOUT      10
+#define REFCNT_MAX_REF          (RTE_MAX_LCORE)
+#define REFCNT_MBUF_NUM         64
+#define REFCNT_MBUF_SIZE        (sizeof (struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define REFCNT_RING_SIZE        (REFCNT_MBUF_NUM * REFCNT_MAX_REF)
+
+#define MAKE_STRING(x)          # x
+
+static struct rte_mempool *pktmbuf_pool = NULL;
+static struct rte_mempool *ctrlmbuf_pool = NULL;
+
+#if defined RTE_MBUF_SCATTER_GATHER  && defined RTE_MBUF_REFCNT_ATOMIC
+
+static struct rte_mempool *refcnt_pool = NULL;
+static struct rte_ring *refcnt_mbuf_ring = NULL;
+static volatile uint32_t refcnt_stop_slaves;
+static uint32_t refcnt_lcore[RTE_MAX_LCORE];
+
+#endif
+
+/*
+ * MBUF
+ * ====
+ *
+ * #. Allocate a mbuf pool.
+ *
+ *    - The pool contains NB_MBUF elements, where each mbuf is MBUF_SIZE
+ *      bytes long.
+ *
+ * #. Test multiple allocations of mbufs from this pool.
+ *
+ *    - Allocate NB_MBUF and store pointers in a table.
+ *    - If an allocation fails, return an error.
+ *    - Free all these mbufs.
+ *    - Repeat the same test to check that mbufs were freed correctly.
+ *
+ * #. Test data manipulation in pktmbuf.
+ *
+ *    - Alloc an mbuf.
+ *    - Append data using rte_pktmbuf_append().
+ *    - Test for error in rte_pktmbuf_append() when len is too large.
+ *    - Trim data at the end of mbuf using rte_pktmbuf_trim().
+ *    - Test for error in rte_pktmbuf_trim() when len is too large.
+ *    - Prepend a header using rte_pktmbuf_prepend().
+ *    - Test for error in rte_pktmbuf_prepend() when len is too large.
+ *    - Remove data at the beginning of mbuf using rte_pktmbuf_adj().
+ *    - Test for error in rte_pktmbuf_adj() when len is too large.
+ *    - Check that appended data is not corrupt.
+ *    - Free the mbuf.
+ *    - Between all these tests, check data_len and pkt_len, and
+ *      that the mbuf is contiguous.
+ *    - Repeat the test to check that allocation operations
+ *      reinitialize the mbuf correctly.
+ *
+ */
+
+#define GOTO_FAIL(str, ...) do {                                       \
+               printf("mbuf test FAILED (l.%d): <" str ">\n",          \
+                      __LINE__,  ##__VA_ARGS__);                       \
+               goto fail;                                              \
+} while(0)
+
+/*
+ * test data manipulation in mbuf with non-ascii data
+ */
+static int
+test_pktmbuf_with_non_ascii_data(void)
+{
+       struct rte_mbuf *m = NULL;
+       char *data;
+
+       m = rte_pktmbuf_alloc(pktmbuf_pool);
+       if (m == NULL)
+               GOTO_FAIL("Cannot allocate mbuf");
+       if (rte_pktmbuf_pkt_len(m) != 0)
+               GOTO_FAIL("Bad length");
+
+       data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
+       if (data == NULL)
+               GOTO_FAIL("Cannot append data");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad data length");
+       memset(data, 0xff, rte_pktmbuf_pkt_len(m));
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+       rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);
+
+       rte_pktmbuf_free(m);
+
+       return 0;
+
+fail:
+       if(m) {
+               rte_pktmbuf_free(m);
+       }
+       return -1;
+}
+
+/*
+ * test data manipulation in mbuf
+ */
+static int
+test_one_pktmbuf(void)
+{
+       struct rte_mbuf *m = NULL;
+       char *data, *data2, *hdr;
+       unsigned i;
+
+       printf("Test pktmbuf API\n");
+
+       /* alloc a mbuf */
+
+       m = rte_pktmbuf_alloc(pktmbuf_pool);
+       if (m == NULL)
+               GOTO_FAIL("Cannot allocate mbuf");
+       if (rte_pktmbuf_pkt_len(m) != 0)
+               GOTO_FAIL("Bad length");
+
+       rte_pktmbuf_dump(m, 0);
+
+       /* append data */
+
+       data = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN);
+       if (data == NULL)
+               GOTO_FAIL("Cannot append data");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad data length");
+       memset(data, 0x66, rte_pktmbuf_pkt_len(m));
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+       rte_pktmbuf_dump(m, MBUF_TEST_DATA_LEN);
+       rte_pktmbuf_dump(m, 2*MBUF_TEST_DATA_LEN);
+
+       /* this append should fail */
+
+       data2 = rte_pktmbuf_append(m, (uint16_t)(rte_pktmbuf_tailroom(m) + 1));
+       if (data2 != NULL)
+               GOTO_FAIL("Append should not succeed");
+
+       /* append some more data */
+
+       data2 = rte_pktmbuf_append(m, MBUF_TEST_DATA_LEN2);
+       if (data2 == NULL)
+               GOTO_FAIL("Cannot append data");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_DATA_LEN2)
+               GOTO_FAIL("Bad data length");
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+
+       /* trim data at the end of mbuf */
+
+       if (rte_pktmbuf_trim(m, MBUF_TEST_DATA_LEN2) < 0)
+               GOTO_FAIL("Cannot trim data");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad data length");
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+
+       /* this trim should fail */
+
+       if (rte_pktmbuf_trim(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) == 0)
+               GOTO_FAIL("trim should not succeed");
+
+       /* prepend one header */
+
+       hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR1_LEN);
+       if (hdr == NULL)
+               GOTO_FAIL("Cannot prepend");
+       if (data - hdr != MBUF_TEST_HDR1_LEN)
+               GOTO_FAIL("Prepend failed");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_HDR1_LEN)
+               GOTO_FAIL("Bad data length");
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+       memset(hdr, 0x55, MBUF_TEST_HDR1_LEN);
+
+       /* prepend another header */
+
+       hdr = rte_pktmbuf_prepend(m, MBUF_TEST_HDR2_LEN);
+       if (hdr == NULL)
+               GOTO_FAIL("Cannot prepend");
+       if (data - hdr != MBUF_TEST_ALL_HDRS_LEN)
+               GOTO_FAIL("Prepend failed");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN + MBUF_TEST_ALL_HDRS_LEN)
+               GOTO_FAIL("Bad data length");
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+       memset(hdr, 0x55, MBUF_TEST_HDR2_LEN);
+
+       rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
+       rte_pktmbuf_dump(m, 0);
+
+       /* this prepend should fail */
+
+       hdr = rte_pktmbuf_prepend(m, (uint16_t)(rte_pktmbuf_headroom(m) + 1));
+       if (hdr != NULL)
+               GOTO_FAIL("prepend should not succeed");
+
+       /* remove data at beginning of mbuf (adj) */
+
+       if (data != rte_pktmbuf_adj(m, MBUF_TEST_ALL_HDRS_LEN))
+               GOTO_FAIL("rte_pktmbuf_adj failed");
+       if (rte_pktmbuf_pkt_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad pkt length");
+       if (rte_pktmbuf_data_len(m) != MBUF_TEST_DATA_LEN)
+               GOTO_FAIL("Bad data length");
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+
+       /* this adj should fail */
+
+       if (rte_pktmbuf_adj(m, (uint16_t)(rte_pktmbuf_data_len(m) + 1)) != NULL)
+               GOTO_FAIL("rte_pktmbuf_adj should not succeed");
+
+       /* check data */
+
+       if (!rte_pktmbuf_is_contiguous(m))
+               GOTO_FAIL("Buffer should be continuous");
+
+       for (i=0; i<MBUF_TEST_DATA_LEN; i++) {
+               if (data[i] != 0x66)
+                       GOTO_FAIL("Data corrupted at offset %u", i);
+       }
+
+       /* free mbuf */
+
+       rte_pktmbuf_free(m);
+       m = NULL;
+       return 0;
+
+fail:
+       if (m)
+               rte_pktmbuf_free(m);
+       return -1;
+}
+
+/*
+ * test control mbuf
+ */
+static int
+test_one_ctrlmbuf(void)
+{
+       struct rte_mbuf *m = NULL;
+       char message[] = "This is a message carried by a ctrlmbuf";
+
+       printf("Test ctrlmbuf API\n");
+
+       /* alloc a mbuf */
+
+       m = rte_ctrlmbuf_alloc(ctrlmbuf_pool);
+       if (m == NULL)
+               GOTO_FAIL("Cannot allocate mbuf");
+       if (rte_ctrlmbuf_len(m) != 0)
+               GOTO_FAIL("Bad length");
+
+       /* set data */
+       rte_ctrlmbuf_data(m) = &message;
+       rte_ctrlmbuf_len(m) = sizeof(message);
+
+       /* read data */
+       if (rte_ctrlmbuf_data(m) != message)
+               GOTO_FAIL("Invalid data pointer");
+       if (rte_ctrlmbuf_len(m) != sizeof(message))
+               GOTO_FAIL("Invalid len");
+
+       rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0);
+
+       /* free mbuf */
+       rte_ctrlmbuf_free(m);
+       m = NULL;
+       return 0;
+
+fail:
+       if (m)
+               rte_ctrlmbuf_free(m);
+       return -1;
+}
+
+static int
+testclone_testupdate_testdetach(void)
+{
+#ifndef RTE_MBUF_SCATTER_GATHER
+       return 0;
+#else
+       struct rte_mbuf *mc = NULL;
+       struct rte_mbuf *clone = NULL;
+
+       /* alloc a mbuf */
+
+       mc = rte_pktmbuf_alloc(pktmbuf_pool);
+       if (mc == NULL)
+               GOTO_FAIL("ooops not allocating mbuf");
+
+       if (rte_pktmbuf_pkt_len(mc) != 0)
+               GOTO_FAIL("Bad length");
+
+
+       /* clone the allocated mbuf */
+       clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
+       if (clone == NULL)
+               GOTO_FAIL("cannot clone data\n");
+       rte_pktmbuf_free(clone);
+
+       mc->pkt.next = rte_pktmbuf_alloc(pktmbuf_pool);
+       if(mc->pkt.next == NULL)
+               GOTO_FAIL("Next Pkt Null\n");
+
+       clone = rte_pktmbuf_clone(mc, pktmbuf_pool);
+       if (clone == NULL)
+               GOTO_FAIL("cannot clone data\n");
+
+       /* free mbuf */
+       rte_pktmbuf_free(mc);
+       rte_pktmbuf_free(clone);
+       mc = NULL;
+       clone = NULL;
+       return 0;
+
+fail:
+       if (mc)
+               rte_pktmbuf_free(mc);
+       return -1;
+#endif /* RTE_MBUF_SCATTER_GATHER */
+}
+#undef GOTO_FAIL
+
+
+
+/*
+ * test allocation and free of mbufs
+ */
+static int
+test_pktmbuf_pool(void)
+{
+       unsigned i;
+       struct rte_mbuf *m[NB_MBUF];
+       int ret = 0;
+
+       for (i=0; i<NB_MBUF; i++)
+               m[i] = NULL;
+
+       /* alloc NB_MBUF mbufs */
+       for (i=0; i<NB_MBUF; i++) {
+               m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
+               if (m[i] == NULL) {
+                       printf("rte_pktmbuf_alloc() failed (%u)\n", i);
+                       ret = -1;
+               }
+       }
+       struct rte_mbuf *extra = NULL;
+       extra = rte_pktmbuf_alloc(pktmbuf_pool);
+       if(extra != NULL) {
+               printf("Error pool not empty");
+               ret = -1;
+       }
+#ifdef RTE_MBUF_SCATTER_GATHER
+       extra = rte_pktmbuf_clone(m[0], pktmbuf_pool);
+       if(extra != NULL) {
+               printf("Error pool not empty");
+               ret = -1;
+       }
+#endif
+       /* free them */
+       for (i=0; i<NB_MBUF; i++) {
+               if (m[i] != NULL)
+                       rte_pktmbuf_free(m[i]);
+       }
+
+       return ret;
+}
+
+
+
+static int
+test_pktmbuf_free_segment(void)
+{
+       unsigned i;
+       struct rte_mbuf *m[NB_MBUF];
+       int ret = 0;
+
+       for (i=0; i<NB_MBUF; i++)
+               m[i] = NULL;
+
+       /* alloc NB_MBUF mbufs */
+       for (i=0; i<NB_MBUF; i++) {
+               m[i] = rte_pktmbuf_alloc(pktmbuf_pool);
+               if (m[i] == NULL) {
+                       printf("rte_pktmbuf_alloc() failed (%u)\n", i);
+                       ret = -1;
+               }
+       }
+
+       /* free them */
+       for (i=0; i<NB_MBUF; i++) {
+               if (m[i] != NULL) {
+                       struct rte_mbuf *mb, *mt;
+
+                       mb = m[i];
+                       while(mb != NULL) {
+                               mt = mb;
+                               mb = mb->pkt.next;
+                               rte_pktmbuf_free_seg(mt);
+                       }
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Stress test for rte_mbuf atomic refcnt.
+ * Implies that:
+ * RTE_MBUF_SCATTER_GATHER and RTE_MBUF_REFCNT_ATOMIC are both defined.
+ * For more efficency, recomended to run with RTE_LIBRTE_MBUF_DEBUG defined.
+ */
+
+#if defined RTE_MBUF_SCATTER_GATHER  && defined RTE_MBUF_REFCNT_ATOMIC
+
+static int
+test_refcnt_slave(__attribute__((unused)) void *arg)
+{
+       uint32_t lcore, free;
+       void *mp;
+
+       lcore = rte_lcore_id();
+       printf("%s started at lcore %u\n", __func__, lcore);
+
+       free = 0;
+       while (refcnt_stop_slaves == 0) {
+               if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
+                       free++;
+                       rte_pktmbuf_free((struct rte_mbuf *)mp);
+               }
+       }
+
+       refcnt_lcore[lcore] += free;
+       printf("%s finished at lcore %u, "
+              "number of freed mbufs: %u\n",
+              __func__, lcore, free);
+       return (0);
+}
+
+static void
+test_refcnt_iter(uint32_t lcore, uint32_t iter)
+{
+       uint16_t ref;
+       uint32_t i, n, tref, wn;
+       struct rte_mbuf *m;
+
+       tref = 0;
+
+       /* For each mbuf in the pool:
+        * - allocate mbuf,
+        * - increment it's reference up to N+1,
+        * - enqueue it N times into the ring for slave cores to free.
+        */
+       for (i = 0, n = rte_mempool_count(refcnt_pool);
+           i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
+           i++) {
+               ref = RTE_MAX(rte_rand() % REFCNT_MAX_REF, 1UL);
+               tref += ref;
+               if ((ref & 1) != 0) {
+                       rte_pktmbuf_refcnt_update(m, ref);
+                       while (ref-- != 0)
+                               rte_ring_enqueue(refcnt_mbuf_ring, m);
+               } else {
+                       while (ref-- != 0) {
+                               rte_pktmbuf_refcnt_update(m, 1);
+                               rte_ring_enqueue(refcnt_mbuf_ring, m);
+                       }
+               }
+               rte_pktmbuf_free(m);
+       }
+
+       if (i != n)
+               rte_panic("(lcore=%u, iter=%u): was able to allocate only "
+                         "%u from %u mbufs\n", lcore, iter, i, n);
+
+       /* wait till slave lcores  will consume all mbufs */
+       while (!rte_ring_empty(refcnt_mbuf_ring))
+               ;
+
+       /* check that all mbufs are back into mempool by now */
+       for (wn = 0; wn != REFCNT_MAX_TIMEOUT; wn++) {
+               if ((i = rte_mempool_count(refcnt_pool)) == n) {
+                       refcnt_lcore[lcore] += tref;
+                       printf("%s(lcore=%u, iter=%u) completed, "
+                           "%u references processed\n",
+                           __func__, lcore, iter, tref);
+                       return;
+               }
+               rte_delay_ms(1000);
+       }
+
+       rte_panic("(lcore=%u, iter=%u): after %us only "
+                 "%u of %u mbufs left free\n", lcore, iter, wn, i, n);
+}
+
+static int
+test_refcnt_master(void)
+{
+       uint32_t i, lcore;
+
+       lcore = rte_lcore_id();
+       printf("%s started at lcore %u\n", __func__, lcore);
+
+       for (i = 0; i != REFCNT_MAX_ITER; i++)
+               test_refcnt_iter(lcore, i);
+
+       refcnt_stop_slaves = 1;
+       rte_wmb();
+
+       printf("%s finished at lcore %u\n", __func__, lcore);
+       return (0);
+}
+
+#endif
+
+static int
+test_refcnt_mbuf(void)
+{
+#if defined RTE_MBUF_SCATTER_GATHER  && defined RTE_MBUF_REFCNT_ATOMIC
+
+       uint32_t lnum, master, slave, tref;
+
+
+       if ((lnum = rte_lcore_count()) == 1) {
+               printf("skipping %s, number of lcores: %u is not enough\n",
+                   __func__, lnum);
+               return (0);
+       }
+
+       printf("starting %s, at %u lcores\n", __func__, lnum);
+
+       /* create refcnt pool & ring if they don't exist */
+
+       if (refcnt_pool == NULL &&
+                       (refcnt_pool = rte_mempool_create(
+                       MAKE_STRING(refcnt_pool),
+                       REFCNT_MBUF_NUM, REFCNT_MBUF_SIZE, 0,
+                       sizeof(struct rte_pktmbuf_pool_private),
+                       rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
+                       SOCKET_ID_ANY, 0)) == NULL) {
+               printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
+                   __func__);
+               return (-1);
+       }
+
+       if (refcnt_mbuf_ring == NULL &&
+                       (refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
+                       REFCNT_RING_SIZE, SOCKET_ID_ANY,
+                       RING_F_SP_ENQ)) == NULL) {
+               printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
+                   "\n", __func__);
+               return (-1);
+       }
+
+       refcnt_stop_slaves = 0;
+       memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
+
+       rte_eal_mp_remote_launch(test_refcnt_slave, NULL, SKIP_MASTER);
+
+       test_refcnt_master();
+
+       rte_eal_mp_wait_lcore();
+
+       /* check that we porcessed all references */
+       tref = 0;
+       master = rte_get_master_lcore();
+
+       RTE_LCORE_FOREACH_SLAVE(slave)
+               tref += refcnt_lcore[slave];
+
+       if (tref != refcnt_lcore[master])
+               rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
+                         tref, refcnt_lcore[master]);
+
+       rte_mempool_dump(refcnt_pool);
+       rte_ring_dump(refcnt_mbuf_ring);
+
+#endif
+       return (0);
+}
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+
+/* baremetal - don't test failing sanity checks */
+static int
+test_failing_mbuf_sanity_check(void)
+{
+       return 0;
+}
+
+#else
+
+#include <unistd.h>
+#include <sys/wait.h>
+
+/* linuxapp - use fork() to test mbuf errors panic */
+static int
+verify_mbuf_check_panics(struct rte_mbuf *buf)
+{
+       int pid;
+       int status;
+
+       pid = fork();
+
+       if (pid == 0) {
+               rte_mbuf_sanity_check(buf, RTE_MBUF_PKT, 1); /* should panic */
+               exit(0);  /* return normally if it doesn't panic */
+       } else if (pid < 0){
+               printf("Fork Failed\n");
+               return -1;
+       }
+       wait(&status);
+       if(status == 0)
+               return -1;
+
+       return 0;
+}
+
+static int
+test_failing_mbuf_sanity_check(void)
+{
+       struct rte_mbuf *buf;
+       struct rte_mbuf badbuf;
+
+       printf("Checking rte_mbuf_sanity_check for failure conditions\n");
+
+       /* get a good mbuf to use to make copies */
+       buf = rte_pktmbuf_alloc(pktmbuf_pool);
+       if (buf == NULL)
+               return -1;
+       printf("Checking good mbuf initially\n");
+       if (verify_mbuf_check_panics(buf) != -1)
+               return -1;
+
+       printf("Now checking for error conditions\n");
+
+       if (verify_mbuf_check_panics(NULL)) {
+               printf("Error with NULL mbuf test\n");
+               return -1;
+       }
+
+       badbuf = *buf;
+       badbuf.type = (uint8_t)-1;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-type mbuf test\n");
+               return -1;
+       }
+
+       badbuf = *buf;
+       badbuf.pool = NULL;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-pool mbuf test\n");
+               return -1;
+       }
+
+       badbuf = *buf;
+       badbuf.buf_physaddr = 0;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-physaddr mbuf test\n");
+               return -1;
+       }
+
+       badbuf = *buf;
+       badbuf.buf_addr = NULL;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-addr mbuf test\n");
+               return -1;
+       }
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+       badbuf = *buf;
+       badbuf.refcnt = 0;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-refcnt(0) mbuf test\n");
+               return -1;
+       }
+
+       badbuf = *buf;
+       badbuf.refcnt = UINT16_MAX;
+       if (verify_mbuf_check_panics(&badbuf)) {
+               printf("Error with bad-refcnt(MAX) mbuf test\n");
+               return -1;
+       }
+#endif
+
+       return 0;
+}
+#endif
+
+
+int
+test_mbuf(void)
+{
+       RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) != 64);
+
+       /* create pktmbuf pool if it does not exist */
+       if (pktmbuf_pool == NULL) {
+               pktmbuf_pool =
+                       rte_mempool_create("test_pktmbuf_pool", NB_MBUF,
+                                          MBUF_SIZE, 32,
+                                          sizeof(struct rte_pktmbuf_pool_private),
+                                          rte_pktmbuf_pool_init, NULL,
+                                          rte_pktmbuf_init, NULL,
+                                          SOCKET_ID_ANY, 0);
+       }
+
+       if (pktmbuf_pool == NULL) {
+               printf("cannot allocate mbuf pool\n");
+               return -1;
+       }
+
+       /* test multiple mbuf alloc */
+       if (test_pktmbuf_pool() < 0) {
+               printf("test_mbuf_pool() failed\n");
+               return -1;
+       }
+
+       /* do it another time to check that all mbufs were freed */
+       if (test_pktmbuf_pool() < 0) {
+               printf("test_mbuf_pool() failed (2)\n");
+               return -1;
+       }
+
+       /* test data manipulation in mbuf */
+       if (test_one_pktmbuf() < 0) {
+               printf("test_one_mbuf() failed\n");
+               return -1;
+       }
+
+
+       /*
+        * do it another time, to check that allocation reinitialize
+        * the mbuf correctly
+        */
+       if (test_one_pktmbuf() < 0) {
+               printf("test_one_mbuf() failed (2)\n");
+               return -1;
+       }
+
+       if (test_pktmbuf_with_non_ascii_data() < 0) {
+               printf("test_pktmbuf_with_non_ascii_data() failed\n");
+               return -1;
+       }
+
+       /* create ctrlmbuf pool if it does not exist */
+       if (ctrlmbuf_pool == NULL) {
+               ctrlmbuf_pool =
+                       rte_mempool_create("test_ctrlmbuf_pool", NB_MBUF,
+                                          sizeof(struct rte_mbuf), 32, 0,
+                                          NULL, NULL,
+                                          rte_ctrlmbuf_init, NULL,
+                                          SOCKET_ID_ANY, 0);
+       }
+
+       /* test control mbuf */
+       if (test_one_ctrlmbuf() < 0) {
+               printf("test_one_ctrlmbuf() failed\n");
+               return -1;
+       }
+
+       /* test free pktmbuf segment one by one */
+       if (test_pktmbuf_free_segment() < 0) {
+               printf("test_pktmbuf_free_segment() failed.\n");
+               return -1;
+       }
+
+       if (testclone_testupdate_testdetach()<0){
+               printf("testclone_and_testupdate() failed \n");
+               return -1;
+       }
+
+       if (test_refcnt_mbuf()<0){
+               printf("test_refcnt_mbuf() failed \n");
+               return -1;
+       }
+
+       if (test_failing_mbuf_sanity_check() < 0) {
+               printf("test_failing_mbuf_sanity_check() failed\n");
+               return -1;
+       }
+       return 0;
+}
diff --git a/app/test/test_memcpy.c b/app/test/test_memcpy.c
new file mode 100644 (file)
index 0000000..3ae3f99
--- /dev/null
@@ -0,0 +1,429 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <rte_common.h>
+#include <cmdline_parse.h>
+#include <rte_cycles.h>
+#include <rte_random.h>
+#include <rte_malloc.h>
+
+#include <rte_memcpy.h>
+
+#include "test.h"
+
+/*
+ * Set this to the maximum buffer size you want to test. If it is 0, then the
+ * values in the buf_sizes[] array below will be used.
+ */
+#define TEST_VALUE_RANGE        0
+
+/* List of buffer sizes to test */
+#if TEST_VALUE_RANGE == 0
+static size_t buf_sizes[] = {
+       0, 1, 7, 8, 9, 15, 16, 17, 31, 32, 33, 63, 64, 65, 127, 128, 129, 255,
+       256, 257, 320, 384, 511, 512, 513, 1023, 1024, 1025, 1518, 1522, 1600,
+       2048, 3072, 4096, 5120, 6144, 7168, 8192
+};
+/* MUST be as large as largest packet size above */
+#define SMALL_BUFFER_SIZE       8192
+#else /* TEST_VALUE_RANGE != 0 */
+static size_t buf_sizes[TEST_VALUE_RANGE];
+#define SMALL_BUFFER_SIZE       TEST_VALUE_RANGE
+#endif /* TEST_VALUE_RANGE == 0 */
+
+
+/*
+ * Arrays of this size are used for measuring uncached memory accesses by
+ * picking a random location within the buffer. Make this smaller if there are
+ * memory allocation errors.
+ */
+#define LARGE_BUFFER_SIZE       (100 * 1024 * 1024)
+
+/* How many times to run timing loop for performance tests */
+#define TEST_ITERATIONS         1000000
+#define TEST_BATCH_SIZE         100
+
+/* Data is aligned on this many bytes (power of 2) */
+#define ALIGNMENT_UNIT          16
+
+/*
+ * Pointers used in performance tests. The two large buffers are for uncached
+ * access where random addresses within the buffer are used for each
+ * memcpy. The two small buffers are for cached access.
+ */
+static uint8_t *large_buf_read, *large_buf_write,
+               *small_buf_read, *small_buf_write;
+
+/* Initialise data buffers. */
+static int
+init_buffers(void)
+{
+       unsigned i;
+
+       large_buf_read = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
+       if (large_buf_read == NULL)
+               goto error_large_buf_read;
+
+       large_buf_write = rte_malloc("memcpy", LARGE_BUFFER_SIZE, ALIGNMENT_UNIT);
+       if (large_buf_write == NULL)
+               goto error_large_buf_write;
+
+       small_buf_read = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
+       if (small_buf_read == NULL)
+               goto error_small_buf_read;
+
+       small_buf_write = rte_malloc("memcpy", SMALL_BUFFER_SIZE, ALIGNMENT_UNIT);
+       if (small_buf_write == NULL)
+               goto error_small_buf_write;
+
+       for (i = 0; i < LARGE_BUFFER_SIZE; i++)
+               large_buf_read[i] = rte_rand();
+       for (i = 0; i < SMALL_BUFFER_SIZE; i++)
+               small_buf_read[i] = rte_rand();
+
+       return 0;
+
+error_small_buf_write:
+       rte_free(small_buf_read);
+error_small_buf_read:
+       rte_free(large_buf_write);
+error_large_buf_write:
+       rte_free(large_buf_read);
+error_large_buf_read:
+       printf("ERROR: not enough memory");
+       return -1;
+}
+
+/* Cleanup data buffers */
+static void
+free_buffers(void)
+{
+       rte_free(large_buf_read);
+       rte_free(large_buf_write);
+       rte_free(small_buf_read);
+       rte_free(small_buf_write);
+}
+
+/*
+ * Get a random offset into large array, with enough space needed to perform
+ * max copy size. Offset is aligned.
+ */
+static inline size_t
+get_rand_offset(void)
+{
+       return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
+                       ~(ALIGNMENT_UNIT - 1));
+}
+
+/* Fill in source and destination addresses. */
+static inline void
+fill_addr_arrays(size_t *dst_addr, int is_dst_cached,
+                 size_t *src_addr, int is_src_cached)
+{
+       unsigned int i;
+
+       for (i = 0; i < TEST_BATCH_SIZE; i++) {
+               dst_addr[i] = (is_dst_cached) ? 0 : get_rand_offset();
+               src_addr[i] = (is_src_cached) ? 0 : get_rand_offset();
+       }
+}
+
+/* Integer division with round to nearest */
+static inline uint64_t
+div_round(uint64_t dividend, uint64_t divisor)
+{
+       return ((2 * dividend) + divisor) / (2 * divisor);
+}
+
+/*
+ * WORKAROUND: For some reason the first test doing an uncached write
+ * takes a very long time (~25 times longer than is expected). So we do
+ * it once without timing.
+ */
+static void
+do_uncached_write(uint8_t *dst, int is_dst_cached,
+                  const uint8_t *src, int is_src_cached, size_t size)
+{
+       unsigned i, j;
+       size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE];
+
+       for (i = 0; i < (TEST_ITERATIONS / TEST_BATCH_SIZE); i++) {
+               fill_addr_arrays(dst_addrs, is_dst_cached,
+                        src_addrs, is_src_cached);
+               for (j = 0; j < TEST_BATCH_SIZE; j++)
+                       rte_memcpy(dst+dst_addrs[j], src+src_addrs[j], size);
+       }
+}
+
+/*
+ * Run a single memcpy performance test. This is a macro to ensure that if
+ * the "size" parameter is a constant it won't be converted to a variable.
+ */
+#define SINGLE_PERF_TEST(dst, is_dst_cached, src, is_src_cached, size) do {   \
+       unsigned int iter, t;                                                 \
+       size_t dst_addrs[TEST_BATCH_SIZE], src_addrs[TEST_BATCH_SIZE];        \
+       uint64_t start_time, total_time = 0;                                  \
+       uint64_t total_time2 = 0;                                             \
+       for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) {  \
+               fill_addr_arrays(dst_addrs, is_dst_cached,                    \
+                                src_addrs, is_src_cached);                   \
+               start_time = rte_rdtsc();                                     \
+               for (t = 0; t < TEST_BATCH_SIZE; t++)                         \
+                       rte_memcpy(dst+dst_addrs[t], src+src_addrs[t], size); \
+               total_time += rte_rdtsc() - start_time;                       \
+       }                                                                     \
+       for (iter = 0; iter < (TEST_ITERATIONS / TEST_BATCH_SIZE); iter++) {  \
+               fill_addr_arrays(dst_addrs, is_dst_cached,                    \
+                                src_addrs, is_src_cached);                   \
+               start_time = rte_rdtsc();                                     \
+               for (t = 0; t < TEST_BATCH_SIZE; t++)                         \
+                       memcpy(dst+dst_addrs[t], src+src_addrs[t], size);     \
+               total_time2 += rte_rdtsc() - start_time;                      \
+       }                                                                     \
+       printf("%9u/",  (unsigned)div_round(total_time, TEST_ITERATIONS));    \
+       printf("%4u",   (unsigned)div_round(total_time2, TEST_ITERATIONS));   \
+} while (0)
+
+/* Run memcpy() tests for each cached/uncached permutation. */
+#define ALL_PERF_TESTS_FOR_SIZE(n) do {                             \
+       if (__builtin_constant_p(n))                                \
+               printf("\nC%6u ", (unsigned)n);                     \
+       else                                                        \
+               printf("\n%7u ", (unsigned)n);                      \
+       SINGLE_PERF_TEST(small_buf_write, 1, small_buf_read, 1, n); \
+       SINGLE_PERF_TEST(large_buf_write, 0, small_buf_read, 1, n); \
+       SINGLE_PERF_TEST(small_buf_write, 1, large_buf_read, 0, n); \
+       SINGLE_PERF_TEST(large_buf_write, 0, large_buf_read, 0, n); \
+} while (0)
+
+/*
+ * Run performance tests for a number of different sizes and cached/uncached
+ * permutations.
+ */
+static int
+perf_test(void)
+{
+       const unsigned num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
+       unsigned i;
+       int ret;
+
+       ret = init_buffers();
+       if (ret != 0)
+               return ret;
+
+#if TEST_VALUE_RANGE != 0
+       /* Setup buf_sizes array, if required */
+       for (i = 0; i < TEST_VALUE_RANGE; i++)
+               buf_sizes[i] = i;
+#endif
+
+       /* See function comment */
+       do_uncached_write(large_buf_write, 0, small_buf_read, 1, SMALL_BUFFER_SIZE);
+
+       printf("\n** rte_memcpy()/memcpy performance tests **\n"
+              "======= ============== ============== ============== ==============\n"
+              "   Size Cache to cache   Cache to mem   Mem to cache     Mem to mem\n"
+              "(bytes)        (ticks)        (ticks)        (ticks)        (ticks)\n"
+              "------- -------------- -------------- -------------- --------------");
+
+       /* Do tests where size is a variable */
+       for (i = 0; i < num_buf_sizes; i++) {
+               ALL_PERF_TESTS_FOR_SIZE((size_t)buf_sizes[i]);
+       }
+
+#ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P
+       /* Do tests where size is a compile-time constant */
+       ALL_PERF_TESTS_FOR_SIZE(63U);
+       ALL_PERF_TESTS_FOR_SIZE(64U);
+       ALL_PERF_TESTS_FOR_SIZE(65U);
+       ALL_PERF_TESTS_FOR_SIZE(255U);
+       ALL_PERF_TESTS_FOR_SIZE(256U);
+       ALL_PERF_TESTS_FOR_SIZE(257U);
+       ALL_PERF_TESTS_FOR_SIZE(1023U);
+       ALL_PERF_TESTS_FOR_SIZE(1024U);
+       ALL_PERF_TESTS_FOR_SIZE(1025U);
+       ALL_PERF_TESTS_FOR_SIZE(1518U);
+#endif
+       printf("\n======= ============== ============== ============== ==============\n\n");
+
+       free_buffers();
+
+       return 0;
+}
+
+/* Structure with base memcpy func pointer, and number of bytes it copies */
+struct base_memcpy_func {
+       void (*func)(uint8_t *dst, const uint8_t *src);
+       unsigned size;
+};
+
+/* To create base_memcpy_func structure entries */
+#define BASE_FUNC(n) {rte_mov##n, n}
+
+/* Max number of bytes that can be copies with a "base" memcpy functions */
+#define MAX_BASE_FUNC_SIZE 256
+
+/*
+ * Test the "base" memcpy functions, that a copy fixed number of bytes.
+ */
+static int
+base_func_test(void)
+{
+       const struct base_memcpy_func base_memcpy_funcs[6] = {
+               BASE_FUNC(16),
+               BASE_FUNC(32),
+               BASE_FUNC(48),
+               BASE_FUNC(64),
+               BASE_FUNC(128),
+               BASE_FUNC(256),
+       };
+       unsigned i, j;
+       unsigned num_funcs = sizeof(base_memcpy_funcs) / sizeof(base_memcpy_funcs[0]);
+       uint8_t dst[MAX_BASE_FUNC_SIZE];
+       uint8_t src[MAX_BASE_FUNC_SIZE];
+
+       for (i = 0; i < num_funcs; i++) {
+               unsigned size = base_memcpy_funcs[i].size;
+               for (j = 0; j < size; j++) {
+                       dst[j] = 0;
+                       src[j] = (uint8_t) rte_rand();
+               }
+               base_memcpy_funcs[i].func(dst, src);
+               for (j = 0; j < size; j++)
+                       if (dst[j] != src[j])
+                               return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * Create two buffers, and initialise one with random values. These are copied
+ * to the second buffer and then compared to see if the copy was successful.
+ * The bytes outside the copied area are also checked to make sure they were not
+ * changed.
+ */
+static int
+test_single_memcpy(unsigned int off_src, unsigned int off_dst, size_t size)
+{
+       unsigned int i;
+       uint8_t dest[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
+       uint8_t src[SMALL_BUFFER_SIZE + ALIGNMENT_UNIT];
+
+       /* Setup buffers */
+       for (i = 0; i < SMALL_BUFFER_SIZE + ALIGNMENT_UNIT; i++) {
+               dest[i] = 0;
+               src[i] = (uint8_t) rte_rand();
+       }
+
+       /* Do the copy */
+       rte_memcpy(dest + off_dst, src + off_src, size);
+
+       /* Check nothing before offset is affected */
+       for (i = 0; i < off_dst; i++) {
+               if (dest[i] != 0) {
+                       printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
+                              "[modified before start of dst].\n",
+                              (unsigned)size, off_src, off_dst);
+                       return -1;
+               }
+       }
+
+       /* Check everything was copied */
+       for (i = 0; i < size; i++) {
+               if (dest[i + off_dst] != src[i + off_src]) {
+                       printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
+                              "[didn't copy byte %u].\n",
+                              (unsigned)size, off_src, off_dst, i);
+                       return -1;
+               }
+       }
+
+       /* Check nothing after copy was affected */
+       for (i = size; i < SMALL_BUFFER_SIZE; i++) {
+               if (dest[i + off_dst] != 0) {
+                       printf("rte_memcpy() failed for %u bytes (offsets=%u,%u): "
+                              "[copied too many].\n",
+                              (unsigned)size, off_src, off_dst);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * Check functionality for various buffer sizes and data offsets/alignments.
+ */
+static int
+func_test(void)
+{
+       unsigned int off_src, off_dst, i;
+       unsigned int num_buf_sizes = sizeof(buf_sizes) / sizeof(buf_sizes[0]);
+       int ret;
+
+       for (off_src = 0; off_src < ALIGNMENT_UNIT; off_src++) {
+               for (off_dst = 0; off_dst < ALIGNMENT_UNIT; off_dst++) {
+                       for (i = 0; i < num_buf_sizes; i++) {
+                               ret = test_single_memcpy(off_src, off_dst,
+                                                        buf_sizes[i]);
+                               if (ret != 0)
+                                       return -1;
+                       }
+               }
+       }
+       return 0;
+}
+
+int
+test_memcpy(void)
+{
+       int ret;
+
+       ret = func_test();
+       if (ret != 0)
+               return -1;
+       ret = base_func_test();
+       if (ret != 0)
+               return -1;
+       ret = perf_test();
+       if (ret != 0)
+               return -1;
+       return 0;
+}
diff --git a/app/test/test_memory.c b/app/test/test_memory.c
new file mode 100644 (file)
index 0000000..8a25eca
--- /dev/null
@@ -0,0 +1,92 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_memory.h>
+#include <rte_common.h>
+
+#include "test.h"
+
+/*
+ * Memory
+ * ======
+ *
+ * - Dump the mapped memory. The python-expect script checks that at
+ *   least one line is dumped.
+ *
+ * - Check that memory size is different than 0.
+ *
+ * - Try to read all memory; it should not segfault.
+ */
+
+int
+test_memory(void)
+{
+       uint64_t s;
+       unsigned i, j;
+       const struct rte_memseg *mem;
+       volatile uint8_t x;
+
+       /*
+        * dump the mapped memory: the python-expect script checks
+        * that at least one line is dumped
+        */
+       printf("Dump memory layout\n");
+       rte_dump_physmem_layout();
+
+       /* check that memory size is != 0 */
+       s = rte_eal_get_physmem_size();
+       if (s == 0) {
+               printf("No memory detected\n");
+               return -1;
+       }
+
+       /* try to read memory (should not segfault) */
+       mem = rte_eal_get_physmem_layout();
+       for (i = 0; i < RTE_MAX_MEMSEG && mem[i].addr != NULL ; i++) {
+
+               /* check memory */
+               for (j = 0; j<mem[i].len; j++) {
+                       x = *((uint8_t *) mem[i].addr + j);
+                       RTE_SET_USED(x);
+               }
+       }
+
+       return 0;
+}
diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
new file mode 100644 (file)
index 0000000..c181057
--- /dev/null
@@ -0,0 +1,707 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+/*
+ * Mempool
+ * =======
+ *
+ * #. Basic tests: done on one core with and without cache:
+ *
+ *    - Get one object, put one object
+ *    - Get two objects, put two objects
+ *    - Get all objects, test that their content is not modified and
+ *      put them back in the pool.
+ *
+ * #. Performance tests:
+ *
+ *    Each core get *n_keep* objects per bulk of *n_get_bulk*. Then,
+ *    objects are put back in the pool per bulk of *n_put_bulk*.
+ *
+ *    This sequence is done during TIME_S seconds.
+ *
+ *    This test is done on the following configurations:
+ *
+ *    - Cores configuration (*cores*)
+ *
+ *      - One core with cache
+ *      - Two cores with cache
+ *      - Max. cores with cache
+ *      - One core without cache
+ *      - Two cores without cache
+ *      - Max. cores without cache
+ *
+ *    - Bulk size (*n_get_bulk*, *n_put_bulk*)
+ *
+ *      - Bulk get from 1 to 32
+ *      - Bulk put from 1 to 32
+ *
+ *    - Number of kept objects (*n_keep*)
+ *
+ *      - 32
+ *      - 128
+ */
+
+#define N 65536
+#define TIME_S 5
+#define MEMPOOL_ELT_SIZE 2048
+#define MAX_KEEP 128
+#define MEMPOOL_SIZE ((RTE_MAX_LCORE*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
+
+static struct rte_mempool *mp;
+static struct rte_mempool *mp_cache, *mp_nocache;
+
+static rte_atomic32_t synchro;
+
+/* number of objects in one bulk operation (get or put) */
+static unsigned n_get_bulk;
+static unsigned n_put_bulk;
+
+/* number of objects retrived from mempool before putting them back */
+static unsigned n_keep;
+
+/* number of enqueues / dequeues */
+struct mempool_test_stats {
+       unsigned enq_count;
+} __rte_cache_aligned;
+
+static struct mempool_test_stats stats[RTE_MAX_LCORE];
+
+static int
+per_lcore_mempool_test(__attribute__((unused)) void *arg)
+{
+       void *obj_table[MAX_KEEP];
+       unsigned i, idx;
+       unsigned lcore_id = rte_lcore_id();
+       int ret;
+       uint64_t start_cycles, end_cycles;
+       uint64_t time_diff = 0, hz = rte_get_hpet_hz();
+
+       /* n_get_bulk and n_put_bulk must be divisors of n_keep */
+       if (((n_keep / n_get_bulk) * n_get_bulk) != n_keep)
+               return -1;
+       if (((n_keep / n_put_bulk) * n_put_bulk) != n_keep)
+               return -1;
+
+       stats[lcore_id].enq_count = 0;
+
+       /* wait synchro for slaves */
+       if (lcore_id != rte_get_master_lcore())
+               while (rte_atomic32_read(&synchro) == 0);
+
+       start_cycles = rte_get_hpet_cycles();
+
+       while (time_diff/hz < TIME_S) {
+               for (i = 0; likely(i < (N/n_keep)); i++) {
+                       /* get n_keep objects by bulk of n_bulk */
+                       idx = 0;
+                       while (idx < n_keep) {
+                               ret = rte_mempool_get_bulk(mp, &obj_table[idx],
+                                                          n_get_bulk);
+                               if (unlikely(ret < 0)) {
+                                       rte_mempool_dump(mp);
+                                       rte_ring_dump(mp->ring);
+                                       /* in this case, objects are lost... */
+                                       return -1;
+                               }
+                               idx += n_get_bulk;
+                       }
+
+                       /* put the objects back */
+                       idx = 0;
+                       while (idx < n_keep) {
+                               rte_mempool_put_bulk(mp, &obj_table[idx],
+                                                    n_put_bulk);
+                               idx += n_put_bulk;
+                       }
+               }
+               end_cycles = rte_get_hpet_cycles();
+               time_diff = end_cycles - start_cycles;
+               stats[lcore_id].enq_count += N;
+       }
+
+       return 0;
+}
+
+/* launch all the per-lcore test, and display the result */
+static int
+launch_cores(unsigned cores)
+{
+       unsigned lcore_id;
+       unsigned rate;
+       int ret;
+       unsigned cores_save = cores;
+
+       rte_atomic32_set(&synchro, 0);
+
+       /* reset stats */
+       memset(stats, 0, sizeof(stats));
+
+       printf("mempool_autotest cache=%u cores=%u n_get_bulk=%u "
+              "n_put_bulk=%u n_keep=%u ",
+              (unsigned) mp->cache_size, cores, n_get_bulk, n_put_bulk, n_keep);
+
+       if (rte_mempool_count(mp) != MEMPOOL_SIZE) {
+               printf("mempool is not full\n");
+               return -1;
+       }
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (cores == 1)
+                       break;
+               cores--;
+               rte_eal_remote_launch(per_lcore_mempool_test,
+                                     NULL, lcore_id);
+       }
+
+       /* start synchro and launch test on master */
+       rte_atomic32_set(&synchro, 1);
+
+       ret = per_lcore_mempool_test(NULL);
+
+       cores = cores_save;
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (cores == 1)
+                       break;
+               cores--;
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       ret = -1;
+       }
+
+       if (ret < 0) {
+               printf("per-lcore test returned -1\n");
+               return -1;
+       }
+
+       rate = 0;
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+               rate += (stats[lcore_id].enq_count / TIME_S);
+
+       printf("rate_persec=%u\n", rate);
+
+       return 0;
+}
+
+/* for a given number of core, launch all test cases */
+static int
+do_one_mempool_test(unsigned cores)
+{
+       unsigned bulk_tab_get[] = { 1, 4, 32, 0 };
+       unsigned bulk_tab_put[] = { 1, 4, 32, 0 };
+       unsigned keep_tab[] = { 32, 128, 0 };
+       unsigned *get_bulk_ptr;
+       unsigned *put_bulk_ptr;
+       unsigned *keep_ptr;
+       int ret;
+
+       for (get_bulk_ptr = bulk_tab_get; *get_bulk_ptr; get_bulk_ptr++) {
+               for (put_bulk_ptr = bulk_tab_put; *put_bulk_ptr; put_bulk_ptr++) {
+                       for (keep_ptr = keep_tab; *keep_ptr; keep_ptr++) {
+
+                               n_get_bulk = *get_bulk_ptr;
+                               n_put_bulk = *put_bulk_ptr;
+                               n_keep = *keep_ptr;
+                               ret = launch_cores(cores);
+
+                               if (ret < 0)
+                                       return -1;
+                       }
+               }
+       }
+       return 0;
+}
+
+
+/*
+ * save the object number in the first 4 bytes of object data. All
+ * other bytes are set to 0.
+ */
+static void
+my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
+           void *obj, unsigned i)
+{
+       uint32_t *objnum = obj;
+       memset(obj, 0, mp->elt_size);
+       *objnum = i;
+}
+
+/* basic tests (done on one core) */
+static int
+test_mempool_basic(void)
+{
+       uint32_t *objnum;
+       void **objtable;
+       void *obj, *obj2;
+       char *obj_data;
+       int ret = 0;
+       unsigned i, j;
+       unsigned old_bulk_count;
+
+       /* dump the mempool status */
+       rte_mempool_dump(mp);
+       old_bulk_count = rte_mempool_get_bulk_count(mp);
+       rte_mempool_dump(mp);
+       if (rte_mempool_set_bulk_count(mp, 0) == 0)
+               return -1;
+       if (rte_mempool_get_bulk_count(mp) == 0)
+               return -1;
+       if (rte_mempool_set_bulk_count(mp, 2) < 0)
+               return -1;
+       if (rte_mempool_get_bulk_count(mp) != 2)
+               return -1;
+       rte_mempool_dump(mp);
+       if (rte_mempool_set_bulk_count(mp, old_bulk_count) < 0)
+               return -1;
+       if (rte_mempool_get_bulk_count(mp) != old_bulk_count)
+               return -1;
+       rte_mempool_dump(mp);
+
+       printf("get an object\n");
+       if (rte_mempool_get(mp, &obj) < 0)
+               return -1;
+       rte_mempool_dump(mp);
+
+       printf("put the object back\n");
+       rte_mempool_put(mp, obj);
+       rte_mempool_dump(mp);
+
+       printf("get 2 objects\n");
+       if (rte_mempool_get(mp, &obj) < 0)
+               return -1;
+       if (rte_mempool_get(mp, &obj2) < 0) {
+               rte_mempool_put(mp, obj);
+               return -1;
+       }
+       rte_mempool_dump(mp);
+
+       printf("put the objects back\n");
+       rte_mempool_put(mp, obj);
+       rte_mempool_put(mp, obj2);
+       rte_mempool_dump(mp);
+
+       /*
+        * get many objects: we cannot get them all because the cache
+        * on other cores may not be empty.
+        */
+       objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
+       if (objtable == NULL) {
+               return -1;
+       }
+
+       for (i=0; i<MEMPOOL_SIZE; i++) {
+               if (rte_mempool_get(mp, &objtable[i]) < 0)
+                       break;
+       }
+
+       /*
+        * for each object, check that its content was not modified,
+        * and put objects back in pool
+        */
+       while (i--) {
+               obj = objtable[i];
+               obj_data = obj;
+               objnum = obj;
+               if (*objnum > MEMPOOL_SIZE) {
+                       printf("bad object number\n");
+                       ret = -1;
+                       break;
+               }
+               for (j=sizeof(*objnum); j<mp->elt_size; j++) {
+                       if (obj_data[j] != 0)
+                               ret = -1;
+               }
+
+               rte_mempool_put(mp, objtable[i]);
+       }
+
+       free(objtable);
+       if (ret == -1)
+               printf("objects were modified!\n");
+
+       return ret;
+}
+
+static int test_mempool_creation_with_exceeded_cache_size(void)
+{
+       struct rte_mempool *mp_cov;
+
+       mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE,
+                                             MEMPOOL_ELT_SIZE,
+                                             RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
+                                             NULL, NULL,
+                                             my_obj_init, NULL,
+                                             SOCKET_ID_ANY, 0);
+       if(NULL != mp_cov) {
+               return -1;
+       }
+
+       return 0;
+}
+
+static struct rte_mempool *mp_spsc;
+static rte_spinlock_t scsp_spinlock;
+static void *scsp_obj_table[MAX_KEEP];
+
+/*
+ * single producer function
+ */
+static int test_mempool_single_producer(void)
+{
+       unsigned int i;
+       void *obj = NULL;
+       uint64_t start_cycles, end_cycles;
+       uint64_t duration = rte_get_hpet_hz() * 8;
+
+       start_cycles = rte_get_hpet_cycles();
+       while (1) {
+               end_cycles = rte_get_hpet_cycles();
+               /* duration uses up, stop producing */
+               if (start_cycles + duration < end_cycles)
+                       break;
+               rte_spinlock_lock(&scsp_spinlock);
+               for (i = 0; i < MAX_KEEP; i ++) {
+                       if (NULL != scsp_obj_table[i])
+                               obj = scsp_obj_table[i];
+                               break;
+               }
+               rte_spinlock_unlock(&scsp_spinlock);
+               if (i >= MAX_KEEP) {
+                       continue;
+               }
+               if (rte_mempool_from_obj(obj) != mp_spsc) {
+                       printf("test_mempool_single_producer there is an obj not owned by this mempool\n");
+                       return -1;
+               }
+               rte_mempool_sp_put(mp_spsc, obj);
+               rte_spinlock_lock(&scsp_spinlock);
+               scsp_obj_table[i] = NULL;
+               rte_spinlock_unlock(&scsp_spinlock);
+       }
+
+       return 0;
+}
+
+/*
+ * single consumer function
+ */
+static int test_mempool_single_consumer(void)
+{
+       unsigned int i;
+       void * obj;
+       uint64_t start_cycles, end_cycles;
+       uint64_t duration = rte_get_hpet_hz() * 5;
+
+       start_cycles = rte_get_hpet_cycles();
+       while (1) {
+               end_cycles = rte_get_hpet_cycles();
+               /* duration uses up, stop consuming */
+               if (start_cycles + duration < end_cycles)
+                       break;
+               rte_spinlock_lock(&scsp_spinlock);
+               for (i = 0; i < MAX_KEEP; i ++) {
+                       if (NULL == scsp_obj_table[i])
+                               break;
+               }
+               rte_spinlock_unlock(&scsp_spinlock);
+               if (i >= MAX_KEEP)
+                       continue;
+               if (rte_mempool_sc_get(mp_spsc, &obj) < 0)
+                       break;
+               rte_spinlock_lock(&scsp_spinlock);
+               scsp_obj_table[i] = obj;
+               rte_spinlock_unlock(&scsp_spinlock);
+       }
+
+       return 0;
+}
+
+/*
+ * test function for mempool test based on singple consumer and single producer, can run on one lcore only
+ */
+static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
+{
+       return test_mempool_single_consumer();
+}
+
+static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg)
+{
+       printf("mempool name is %s\n", mp->name);
+       /* nothing to be implemented here*/
+       return ;
+}
+
+/*
+ * it tests the mempool operations based on singple producer and single consumer
+ */
+static int
+test_mempool_sp_sc(void)
+{
+       int ret = 0;
+       unsigned lcore_id = rte_lcore_id();
+       unsigned lcore_next;
+
+       /* create a mempool with single producer/consumer ring */
+       if (NULL == mp_spsc) {
+               mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
+                                               MEMPOOL_ELT_SIZE, 0, 0,
+                                               my_mp_init, NULL,
+                                               my_obj_init, NULL,
+                                               SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
+               if (NULL == mp_spsc) {
+                       return -1;
+               }
+       }
+       if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
+               printf("Cannot lookup mempool from its name\n");
+               return -1;
+       }
+       lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
+       if (RTE_MAX_LCORE <= lcore_next)
+               return -1;
+       if (rte_eal_lcore_role(lcore_next) != ROLE_RTE)
+               return -1;
+       rte_spinlock_init(&scsp_spinlock);
+       memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
+       rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next);
+       if(test_mempool_single_producer() < 0)
+               ret = -1;
+
+       if(rte_eal_wait_lcore(lcore_next) < 0)
+               ret = -1;
+
+       return ret;
+}
+
+/*
+ * it tests some more basic of mempool
+ */
+static int
+test_mempool_basic_ex(struct rte_mempool * mp)
+{
+       unsigned i;
+       void **obj;
+       void *err_obj;
+       int ret = -1;
+
+       if (mp == NULL)
+               return ret;
+
+       obj = (void **)rte_zmalloc("test_mempool_basic_ex", (MEMPOOL_SIZE * sizeof(void *)), 0);
+       if (obj == NULL) {
+               printf("test_mempool_basic_ex fail to rte_malloc\n");
+               return ret;
+       }
+       printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp));
+       if (rte_mempool_full(mp) != 1) {
+               printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+               goto fail_mp_basic_ex;
+       }
+
+       for (i = 0; i < MEMPOOL_SIZE; i ++) {
+               if (rte_mempool_mc_get(mp, &obj[i]) < 0) {
+                       printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i);
+                       goto fail_mp_basic_ex;
+               }
+       }
+       if (rte_mempool_mc_get(mp, &err_obj) == 0) {
+               printf("test_mempool_basic_ex get an impossible obj from mempool\n");
+               goto fail_mp_basic_ex;
+       }
+       printf("number: %u\n", i);
+       if (rte_mempool_empty(mp) != 1) {
+               printf("test_mempool_basic_ex the mempool is not empty but it should be\n");
+               goto fail_mp_basic_ex;
+       }
+
+       for (i = 0; i < MEMPOOL_SIZE; i ++) {
+               rte_mempool_mp_put(mp, obj[i]);
+       }
+       if (rte_mempool_full(mp) != 1) {
+               printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+               goto fail_mp_basic_ex;
+       }
+
+       ret = 0;
+
+fail_mp_basic_ex:
+       if (obj != NULL)
+               rte_free((void *)obj);
+
+       return ret;
+}
+
+static int
+test_mempool_same_name_twice_creation(void)
+{
+       struct rte_mempool *mp_tc;
+
+       mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
+                                               MEMPOOL_ELT_SIZE, 0, 0,
+                                               NULL, NULL,
+                                               NULL, NULL,
+                                               SOCKET_ID_ANY, 0);
+       if (NULL == mp_tc)
+               return -1;
+
+       mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
+                                               MEMPOOL_ELT_SIZE, 0, 0,
+                                               NULL, NULL,
+                                               NULL, NULL,
+                                               SOCKET_ID_ANY, 0);
+       if (NULL != mp_tc)
+               return -1;
+
+       return 0;
+}
+
+int
+test_mempool(void)
+{
+       rte_atomic32_init(&synchro);
+
+       /* create a mempool (without cache) */
+       if (mp_nocache == NULL)
+               mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
+                                               MEMPOOL_ELT_SIZE, 0, 0,
+                                               NULL, NULL,
+                                               my_obj_init, NULL,
+                                               SOCKET_ID_ANY, 0);
+       if (mp_nocache == NULL)
+               return -1;
+
+       /* create a mempool (with cache) */
+       if (mp_cache == NULL)
+               mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
+                                             MEMPOOL_ELT_SIZE,
+                                             RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+                                             NULL, NULL,
+                                             my_obj_init, NULL,
+                                             SOCKET_ID_ANY, 0);
+       if (mp_cache == NULL)
+               return -1;
+
+
+       /* retrieve the mempool from its name */
+       if (rte_mempool_lookup("test_nocache") != mp_nocache) {
+               printf("Cannot lookup mempool from its name\n");
+               return -1;
+       }
+
+       rte_mempool_list_dump();
+
+       /* basic tests without cache */
+       mp = mp_nocache;
+       if (test_mempool_basic() < 0)
+               return -1;
+
+       /* basic tests with cache */
+       mp = mp_cache;
+       if (test_mempool_basic() < 0)
+               return -1;
+
+       /* more basic tests without cache */
+       if (test_mempool_basic_ex(mp_nocache) < 0)
+               return -1;
+
+       /* performance test with 1, 2 and max cores */
+       printf("start performance test (without cache)\n");
+       mp = mp_nocache;
+
+       if (do_one_mempool_test(1) < 0)
+               return -1;
+
+       if (do_one_mempool_test(2) < 0)
+               return -1;
+
+       if (do_one_mempool_test(rte_lcore_count()) < 0)
+               return -1;
+
+       /* performance test with 1, 2 and max cores */
+       printf("start performance test (with cache)\n");
+       mp = mp_cache;
+
+       if (do_one_mempool_test(1) < 0)
+               return -1;
+
+       if (do_one_mempool_test(2) < 0)
+               return -1;
+
+       if (do_one_mempool_test(rte_lcore_count()) < 0)
+               return -1;
+
+       /* mempool operation test based on single producer and single comsumer */
+       if (test_mempool_sp_sc() < 0)
+               return -1;
+
+       if (test_mempool_creation_with_exceeded_cache_size() < 0)
+               return -1;
+
+       if (test_mempool_same_name_twice_creation() < 0)
+               return -1;
+
+       rte_mempool_list_dump();
+
+       return 0;
+}
diff --git a/app/test/test_memzone.c b/app/test/test_memzone.c
new file mode 100644 (file)
index 0000000..dd66211
--- /dev/null
@@ -0,0 +1,639 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_common.h>
+
+#include "test.h"
+
+/*
+ * Memzone
+ * =======
+ *
+ * - Search for three reserved zones or reserve them if they do not exist:
+ *
+ *   - One is on any socket id.
+ *   - The second is on socket 0.
+ *   - The last one is on socket 1 (if socket 1 exists).
+ *
+ * - Check that the zones exist.
+ *
+ * - Check that the zones are cache-aligned.
+ *
+ * - Check that zones do not overlap.
+ *
+ * - Check that the zones are on the correct socket id.
+ *
+ * - Check that a lookup of the first zone returns the same pointer.
+ *
+ * - Check that it is not possible to create another zone with the
+ *   same name as an existing zone.
+ *
+ * - Check flags for specific huge page size reservation
+ */
+
+/* Test if memory overlaps: return 1 if true, or 0 if false. */
+static int
+is_memory_overlap(phys_addr_t ptr1, size_t len1, phys_addr_t ptr2, size_t len2)
+{
+       if (ptr2 >= ptr1 && (ptr2 - ptr1) < len1)
+               return 1;
+       else if (ptr2 < ptr1 && (ptr1 - ptr2) < len2)
+               return 1;
+       return 0;
+}
+
+static int
+test_memzone_invalid_alignment(void)
+{
+       const struct rte_memzone * mz;
+
+       mz = rte_memzone_lookup("invalid_alignment");
+       if (mz != NULL) {
+               printf("Zone with invalid alignment has been reserved\n");
+               return -1;
+       }
+
+       mz = rte_memzone_reserve_aligned("invalid_alignment", 100,
+                       SOCKET_ID_ANY, 0, 100);
+       if (mz != NULL) {
+               printf("Zone with invalid alignment has been reserved\n");
+               return -1;
+       }
+       return 0;
+}
+
+static int
+test_memzone_reserving_zone_size_bigger_than_the_maximum(void)
+{
+       const struct rte_memzone * mz;
+
+       mz = rte_memzone_lookup("zone_size_bigger_than_the_maximum");
+       if (mz != NULL) {
+               printf("zone_size_bigger_than_the_maximum has been reserved\n");
+               return -1;
+       }
+
+       mz = rte_memzone_reserve("zone_size_bigger_than_the_maximum", 0x1900000000ULL,
+                       SOCKET_ID_ANY, 0);
+       if (mz != NULL) {
+               printf("It is impossible to reserve such big a memzone\n");
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+test_memzone_reserve_flags(void)
+{
+       const struct rte_memzone *mz;
+       const struct rte_memseg *ms;
+       int hugepage_2MB_avail = 0;
+       int hugepage_1GB_avail = 0;
+       const int size = 100;
+       int i = 0;
+       ms = rte_eal_get_physmem_layout();
+       for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+               if (ms[i].hugepage_sz == RTE_PGSIZE_2M)
+                       hugepage_2MB_avail = 1;
+               if (ms[i].hugepage_sz == RTE_PGSIZE_1G)
+                       hugepage_1GB_avail = 1;
+       }
+       /* Display the availability of 2MB and 1GB pages */
+       if (hugepage_2MB_avail)
+               printf("2MB Huge pages available\n");
+       if (hugepage_1GB_avail)
+               printf("1GB Huge pages available\n");
+       /*
+        * If 2MB pages available, check that a small memzone is correctly
+        * reserved from 2MB huge pages when requested by the RTE_MEMZONE_2MB flag.
+        * Also check that RTE_MEMZONE_SIZE_HINT_ONLY flag only defaults to an
+        * available page size (i.e 1GB ) when 2MB pages are unavailable.
+        */
+       if (hugepage_2MB_avail) {
+               mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
+                               RTE_MEMZONE_2MB);
+               if (mz == NULL) {
+                       printf("MEMZONE FLAG 2MB\n");
+                       return -1;
+               }
+               if (mz->hugepage_sz != RTE_PGSIZE_2M) {
+                       printf("hugepage_sz not equal 2M\n");
+                       return -1;
+               }
+
+               mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+                               RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+               if (mz == NULL) {
+                       printf("MEMZONE FLAG 2MB\n");
+                       return -1;
+               }
+               if (mz->hugepage_sz != RTE_PGSIZE_2M) {
+                       printf("hugepage_sz not equal 2M\n");
+                       return -1;
+               }
+
+               /* Check if 1GB huge pages are unavailable, that function fails unless
+                * HINT flag is indicated
+                */
+               if (!hugepage_1GB_avail) {
+                       mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+                       if (mz == NULL) {
+                               printf("MEMZONE FLAG 1GB & HINT\n");
+                               return -1;
+                       }
+                       if (mz->hugepage_sz != RTE_PGSIZE_2M) {
+                               printf("hugepage_sz not equal 2M\n");
+                               return -1;
+                       }
+
+                       mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_1GB);
+                       if (mz != NULL) {
+                               printf("MEMZONE FLAG 1GB\n");
+                               return -1;
+                       }
+               }
+       }
+
+       /*As with 2MB tests above for 1GB huge page requests*/
+       if (hugepage_1GB_avail) {
+               mz = rte_memzone_reserve("flag_zone_1G", size, SOCKET_ID_ANY,
+                               RTE_MEMZONE_1GB);
+               if (mz == NULL) {
+                       printf("MEMZONE FLAG 1GB\n");
+                       return -1;
+               }
+               if (mz->hugepage_sz != RTE_PGSIZE_1G) {
+                       printf("hugepage_sz not equal 1G\n");
+                       return -1;
+               }
+
+               mz = rte_memzone_reserve("flag_zone_1G_HINT", size, SOCKET_ID_ANY,
+                               RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY);
+               if (mz == NULL) {
+                       printf("MEMZONE FLAG 1GB\n");
+                       return -1;
+               }
+               if (mz->hugepage_sz != RTE_PGSIZE_1G) {
+                       printf("hugepage_sz not equal 1G\n");
+                       return -1;
+               }
+
+               /* Check if 1GB huge pages are unavailable, that function fails unless
+                * HINT flag is indicated
+                */
+               if (!hugepage_2MB_avail) {
+                       mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_2MB|RTE_MEMZONE_SIZE_HINT_ONLY);
+                       if (mz == NULL){
+                               printf("MEMZONE FLAG 2MB & HINT\n");
+                               return -1;
+                       }
+                       if (mz->hugepage_sz != RTE_PGSIZE_1G) {
+                               printf("hugepage_sz not equal 1G\n");
+                               return -1;
+                       }
+                       mz = rte_memzone_reserve("flag_zone_2M", size, SOCKET_ID_ANY,
+                                       RTE_MEMZONE_2MB);
+                       if (mz != NULL) {
+                               printf("MEMZONE FLAG 2MB\n");
+                               return -1;
+                       }
+               }
+
+               if (hugepage_2MB_avail && hugepage_1GB_avail) {
+                       mz = rte_memzone_reserve("flag_zone_2M_HINT", size, SOCKET_ID_ANY,
+                                                               RTE_MEMZONE_2MB|RTE_MEMZONE_1GB);
+                       if (mz != NULL) {
+                               printf("BOTH SIZES SET\n");
+                               return -1;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int
+test_memzone_reserve_max(void)
+{
+       const struct rte_memzone *mz;
+       const struct rte_config *config;
+       const struct rte_memseg *ms;
+       int memseg_idx = 0;
+       int memzone_idx = 0;
+       uint64_t len = 0;
+       void* last_addr;
+       uint64_t maxlen = 0;
+
+       /* get pointer to global configuration */
+       config = rte_eal_get_configuration();
+
+       ms = rte_eal_get_physmem_layout();
+
+       for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
+               /* ignore smaller memsegs as they can only get smaller */
+               if (ms[memseg_idx].len < maxlen)
+                       continue;
+
+               len = ms[memseg_idx].len;
+               last_addr = ms[memseg_idx].addr;
+
+               /* cycle through all memzones */
+               for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
+
+                       /* stop when reaching last allocated memzone */
+                       if (config->mem_config->memzone[memzone_idx].addr == NULL)
+                               break;
+
+                       /* check if the memzone is in our memseg and subtract length */
+                       if ((config->mem_config->memzone[memzone_idx].addr >=
+                                       ms[memseg_idx].addr) &&
+                                       (config->mem_config->memzone[memzone_idx].addr <=
+                                       (RTE_PTR_ADD(ms[memseg_idx].addr,
+                                       (size_t)ms[memseg_idx].len)))) {
+                               /* since the zones can now be aligned and occasionally skip
+                                * some space, we should calculate the length based on
+                                * reported length and start addresses difference. Addresses
+                                * are allocated sequentially so we don't need to worry about
+                                * them being in the right order.
+                                */
+                               len -= (uintptr_t) RTE_PTR_SUB(
+                                               config->mem_config->memzone[memzone_idx].addr,
+                                               (uintptr_t) last_addr);
+                               len -= config->mem_config->memzone[memzone_idx].len;
+                               last_addr =
+                                               RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
+                                               (size_t) config->mem_config->memzone[memzone_idx].len);
+                       }
+               }
+
+               /* we don't need to calculate offset here since length
+                * is always cache-aligned */
+               if (len > maxlen)
+                       maxlen = len;
+       }
+
+       mz = rte_memzone_reserve("max_zone", 0, SOCKET_ID_ANY, 0);
+       if (mz == NULL){
+               printf("Failed to reserve a big chunk of memory\n");
+               rte_dump_physmem_layout();
+               rte_memzone_dump();
+               return -1;
+       }
+
+       if (mz->len != maxlen) {
+               printf("Memzone reserve with 0 size did not return bigest block\n");
+               printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
+                               maxlen, mz->len);
+               rte_dump_physmem_layout();
+               rte_memzone_dump();
+
+               return -1;
+       }
+       return 0;
+}
+
+static int
+test_memzone_reserve_max_aligned(void)
+{
+       const struct rte_memzone *mz;
+       const struct rte_config *config;
+       const struct rte_memseg *ms;
+       int memseg_idx = 0;
+       int memzone_idx = 0;
+       uint64_t addr_offset, len = 0;
+       void* last_addr;
+       uint64_t maxlen = 0;
+
+       /* get pointer to global configuration */
+       config = rte_eal_get_configuration();
+
+       ms = rte_eal_get_physmem_layout();
+
+       addr_offset = 0;
+
+       for (memseg_idx = 0; memseg_idx < RTE_MAX_MEMSEG; memseg_idx++){
+
+               /* ignore smaller memsegs as they can only get smaller */
+               if (ms[memseg_idx].len < maxlen)
+                       continue;
+
+               len = ms[memseg_idx].len;
+               last_addr = ms[memseg_idx].addr;
+
+               /* cycle through all memzones */
+               for (memzone_idx = 0; memzone_idx < RTE_MAX_MEMZONE; memzone_idx++) {
+
+                       /* stop when reaching last allocated memzone */
+                       if (config->mem_config->memzone[memzone_idx].addr == NULL)
+                               break;
+
+                       /* check if the memzone is in our memseg and subtract length */
+                       if ((config->mem_config->memzone[memzone_idx].addr >=
+                                       ms[memseg_idx].addr) &&
+                                       (config->mem_config->memzone[memzone_idx].addr <=
+                                       (RTE_PTR_ADD(ms[memseg_idx].addr,
+                                       (size_t) ms[memseg_idx].len)))) {
+                               /* since the zones can now be aligned and occasionally skip
+                                * some space, we should calculate the length based on
+                                * reported length and start addresses difference.
+                                */
+                               len -= (uintptr_t) RTE_PTR_SUB(
+                                               config->mem_config->memzone[memzone_idx].addr,
+                                               (uintptr_t) last_addr);
+                               len -= config->mem_config->memzone[memzone_idx].len;
+                               last_addr =
+                                               RTE_PTR_ADD(config->mem_config->memzone[memzone_idx].addr,
+                                               (size_t) config->mem_config->memzone[memzone_idx].len);
+                       }
+               }
+
+               /* make sure we get the alignment offset */
+               if (len > maxlen) {
+                       addr_offset = RTE_ALIGN_CEIL((uintptr_t) last_addr, 512) - (uintptr_t) last_addr;
+                       maxlen = len;
+               }
+       }
+
+       maxlen -= addr_offset;
+
+       mz = rte_memzone_reserve_aligned("max_zone_aligned", 0,
+                       SOCKET_ID_ANY, 0, 512);
+       if (mz == NULL){
+               printf("Failed to reserve a big chunk of memory\n");
+               rte_dump_physmem_layout();
+               rte_memzone_dump();
+               return -1;
+       }
+
+       if (mz->len != maxlen) {
+               printf("Memzone reserve with 0 size and alignment 512 did not return"
+                               " bigest block\n");
+               printf("Expected size = %" PRIu64 ", actual size = %" PRIu64 "\n",
+                               maxlen, mz->len);
+               rte_dump_physmem_layout();
+               rte_memzone_dump();
+
+               return -1;
+       }
+       return 0;
+}
+
+static int
+test_memzone_aligned(void)
+{
+       const struct rte_memzone *memzone_aligned_32;
+       const struct rte_memzone *memzone_aligned_128;
+       const struct rte_memzone *memzone_aligned_256;
+       const struct rte_memzone *memzone_aligned_512;
+       const struct rte_memzone *memzone_aligned_1024;
+
+       /* memzone that should automatically be adjusted to align on 64 bytes */
+       memzone_aligned_32 = rte_memzone_lookup("aligned_32");
+       if (memzone_aligned_32 == NULL)
+               memzone_aligned_32 = rte_memzone_reserve_aligned("aligned_32", 100,
+                               SOCKET_ID_ANY, 0, 32);
+
+       /* memzone that is supposed to be aligned on a 128 byte boundary */
+       memzone_aligned_128 = rte_memzone_lookup("aligned_128");
+       if (memzone_aligned_128 == NULL)
+               memzone_aligned_128 = rte_memzone_reserve_aligned("aligned_128", 100,
+                               SOCKET_ID_ANY, 0, 128);
+
+       /* memzone that is supposed to be aligned on a 256 byte boundary */
+       memzone_aligned_256 = rte_memzone_lookup("aligned_256");
+       if (memzone_aligned_256 == NULL)
+               memzone_aligned_256 = rte_memzone_reserve_aligned("aligned_256", 100,
+                               SOCKET_ID_ANY, 0, 256);
+
+       /* memzone that is supposed to be aligned on a 512 byte boundary */
+       memzone_aligned_512 = rte_memzone_lookup("aligned_512");
+       if (memzone_aligned_512 == NULL)
+               memzone_aligned_512 = rte_memzone_reserve_aligned("aligned_512", 100,
+                               SOCKET_ID_ANY, 0, 512);
+
+       /* memzone that is supposed to be aligned on a 1024 byte boundary */
+       memzone_aligned_1024 = rte_memzone_lookup("aligned_1024");
+       if (memzone_aligned_1024 == NULL)
+               memzone_aligned_1024 = rte_memzone_reserve_aligned("aligned_1024", 100,
+                               SOCKET_ID_ANY, 0, 1024);
+
+       printf("check alignments and lengths\n");
+       if ((memzone_aligned_32->phys_addr & CACHE_LINE_MASK) != 0)
+               return -1;
+       if (((uintptr_t) memzone_aligned_32->addr & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone_aligned_32->len & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone_aligned_128->phys_addr & 127) != 0)
+               return -1;
+       if (((uintptr_t) memzone_aligned_128->addr & 127) != 0)
+               return -1;
+       if ((memzone_aligned_128->len & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone_aligned_256->phys_addr & 255) != 0)
+               return -1;
+       if (((uintptr_t) memzone_aligned_256->addr & 255) != 0)
+               return -1;
+       if ((memzone_aligned_256->len & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone_aligned_512->phys_addr & 511) != 0)
+               return -1;
+       if (((uintptr_t) memzone_aligned_512->addr & 511) != 0)
+               return -1;
+       if ((memzone_aligned_512->len & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone_aligned_1024->phys_addr & 1023) != 0)
+               return -1;
+       if (((uintptr_t) memzone_aligned_1024->addr & 1023) != 0)
+               return -1;
+       if ((memzone_aligned_1024->len & CACHE_LINE_MASK) != 0)
+               return -1;
+
+
+       /* check that zones don't overlap */
+       printf("check overlapping\n");
+       if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
+                                       memzone_aligned_128->phys_addr, memzone_aligned_128->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
+                                       memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
+                                       memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_32->phys_addr, memzone_aligned_32->len,
+                                       memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
+                                       memzone_aligned_256->phys_addr, memzone_aligned_256->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
+                                       memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_128->phys_addr, memzone_aligned_128->len,
+                                       memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
+                                       memzone_aligned_512->phys_addr, memzone_aligned_512->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_256->phys_addr, memzone_aligned_256->len,
+                                       memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+               return -1;
+       if (is_memory_overlap(memzone_aligned_512->phys_addr, memzone_aligned_512->len,
+                                       memzone_aligned_1024->phys_addr, memzone_aligned_1024->len))
+               return -1;
+       return 0;
+}
+
+int
+test_memzone(void)
+{
+       const struct rte_memzone *memzone1;
+       const struct rte_memzone *memzone2;
+       const struct rte_memzone *memzone3;
+       const struct rte_memzone *mz;
+
+       memzone1 = rte_memzone_lookup("testzone1");
+       if (memzone1 == NULL)
+               memzone1 = rte_memzone_reserve("testzone1", 100,
+                               SOCKET_ID_ANY, 0);
+
+       memzone2 = rte_memzone_lookup("testzone2");
+       if (memzone2 == NULL)
+               memzone2 = rte_memzone_reserve("testzone2", 1000,
+                               0, 0);
+
+       memzone3 = rte_memzone_lookup("testzone3");
+       if (memzone3 == NULL)
+               memzone3 = rte_memzone_reserve("testzone3", 1000,
+                               1, 0);
+
+       /* memzone3 may be NULL if we don't have NUMA */
+       if (memzone1 == NULL || memzone2 == NULL)
+               return -1;
+
+       rte_memzone_dump();
+
+       /* check cache-line alignments */
+       printf("check alignments and lengths\n");
+
+       if ((memzone1->phys_addr & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone2->phys_addr & CACHE_LINE_MASK) != 0)
+               return -1;
+       if (memzone3 != NULL && (memzone3->phys_addr & CACHE_LINE_MASK) != 0)
+               return -1;
+       if ((memzone1->len & CACHE_LINE_MASK) != 0 || memzone1->len == 0)
+               return -1;
+       if ((memzone2->len & CACHE_LINE_MASK) != 0 || memzone2->len == 0)
+               return -1;
+       if (memzone3 != NULL && ((memzone3->len & CACHE_LINE_MASK) != 0 ||
+                       memzone3->len == 0))
+               return -1;
+
+       /* check that zones don't overlap */
+       printf("check overlapping\n");
+
+       if (is_memory_overlap(memzone1->phys_addr, memzone1->len,
+                       memzone2->phys_addr, memzone2->len))
+               return -1;
+       if (memzone3 != NULL &&
+                       is_memory_overlap(memzone1->phys_addr, memzone1->len,
+                                       memzone3->phys_addr, memzone3->len))
+               return -1;
+       if (memzone3 != NULL &&
+                       is_memory_overlap(memzone2->phys_addr, memzone2->len,
+                                       memzone3->phys_addr, memzone3->len))
+               return -1;
+
+       printf("check socket ID\n");
+
+       /* memzone2 must be on socket id 0 and memzone3 on socket 1 */
+       if (memzone2->socket_id != 0)
+               return -1;
+       if (memzone3 != NULL && memzone3->socket_id != 1)
+               return -1;
+
+       printf("test zone lookup\n");
+       mz = rte_memzone_lookup("testzone1");
+       if (mz != memzone1)
+               return -1;
+
+       printf("test duplcate zone name\n");
+       mz = rte_memzone_reserve("testzone1", 100,
+                       SOCKET_ID_ANY, 0);
+       if (mz != NULL)
+               return -1;
+
+       printf("test reserving memzone with bigger size than the maximum\n");
+       if (test_memzone_reserving_zone_size_bigger_than_the_maximum() < 0)
+               return -1;
+
+       printf("test reserving the largest size memzone possible\n");
+       if (test_memzone_reserve_max() < 0)
+               return -1;
+
+       printf("test memzone_reserve flags\n");
+       if (test_memzone_reserve_flags() < 0)
+               return -1;
+
+       printf("test alignment for memzone_reserve\n");
+       if (test_memzone_aligned() < 0)
+               return -1;
+
+       printf("test invalid alignment for memzone_reserve\n");
+       if (test_memzone_invalid_alignment() < 0)
+               return -1;
+
+       printf("test reserving the largest size aligned memzone possible\n");
+       if (test_memzone_reserve_max_aligned() < 0)
+               return -1;
+
+       return 0;
+}
diff --git a/app/test/test_mp_secondary.c b/app/test/test_mp_secondary.c
new file mode 100644 (file)
index 0000000..c40a508
--- /dev/null
@@ -0,0 +1,236 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/wait.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_errno.h>
+#include <rte_branch_prediction.h>
+#include <rte_atomic.h>
+#include <rte_ring.h>
+#include <rte_debug.h>
+#include <stdarg.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_hash.h>
+#include <rte_fbk_hash.h>
+#include <rte_lpm.h>
+#include <rte_string_fns.h>
+
+#include "process.h"
+
+#define launch_proc(ARGV) process_dup(ARGV, \
+               sizeof(ARGV)/(sizeof(ARGV[0])), __func__)
+
+/*
+ * This function is called in the primary i.e. main test, to spawn off secondary
+ * processes to run actual mp tests. Uses fork() and exec pair
+ */
+static int
+run_secondary_instances(void)
+{
+       int ret = 0;
+       char coremask[10];
+
+       /* good case, using secondary */
+       const char *argv1[] = {
+                       prgname, "-c", coremask, "--proc-type=secondary"
+       };
+       /* good case, using auto */
+       const char *argv2[] = {
+                       prgname, "-c", coremask, "--proc-type=auto"
+       };
+       /* bad case, using invalid type */
+       const char *argv3[] = {
+                       prgname, "-c", coremask, "--proc-type=ERROR"
+       };
+       /* bad case, using invalid file prefix */
+       const char *argv4[]  = {
+                       prgname, "-c", coremask, "--proc-type=secondary",
+                                       "--file-prefix=ERROR"
+       };
+
+       rte_snprintf(coremask, sizeof(coremask), "%x", \
+                       (1 << rte_get_master_lcore()));
+
+       ret |= launch_proc(argv1);
+       ret |= launch_proc(argv2);
+
+       ret |= !(launch_proc(argv3));
+       ret |= !(launch_proc(argv4));
+
+       return ret;
+}
+
+/*
+ * This function is run in the secondary instance to test that creation of
+ * objects fails in a secondary
+ */
+static int
+run_object_creation_tests(void)
+{
+       const unsigned flags = 0;
+       const unsigned size = 1024;
+       const unsigned elt_size = 64;
+       const unsigned cache_size = 64;
+       const unsigned priv_data_size = 32;
+
+       printf("### Testing object creation - expect lots of mz reserve errors!\n");
+
+       rte_errno = 0;
+       if (rte_memzone_reserve("test_mz", size, rte_socket_id(), flags) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_memzone_reserve\n");
+               return -1;
+       }
+       printf("# Checked rte_memzone_reserve() OK\n");
+
+       rte_errno = 0;
+       if (rte_ring_create("test_rng", size, rte_socket_id(), flags) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_ring_create()\n");
+               return -1;
+       }
+       printf("# Checked rte_ring_create() OK\n");
+
+
+       rte_errno = 0;
+       if (rte_mempool_create("test_mp", size, elt_size, cache_size,
+                       priv_data_size, NULL, NULL, NULL, NULL,
+                       rte_socket_id(), flags) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_ring_create()\n");
+               return -1;
+       }
+       printf("# Checked rte_mempool_create() OK\n");
+
+       const struct rte_hash_parameters hash_params = { .name = "test_mp_hash" };
+       rte_errno=0;
+       if (rte_hash_create(&hash_params) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_ring_create()\n");
+               return -1;
+       }
+       printf("# Checked rte_hash_create() OK\n");
+
+       const struct rte_fbk_hash_params fbk_params = { .name = "test_mp_hash" };
+       rte_errno=0;
+       if (rte_fbk_hash_create(&fbk_params) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_ring_create()\n");
+               return -1;
+       }
+       printf("# Checked rte_fbk_hash_create() OK\n");
+
+       rte_errno=0;
+       if (rte_lpm_create("test_lpm", size, rte_socket_id(), RTE_LPM_HEAP) != NULL
+                       || rte_errno != E_RTE_SECONDARY){
+               printf("Error: unexpected return value from rte_ring_create()\n");
+               return -1;
+       }
+       printf("# Checked rte_lpm_create() OK\n");
+
+       /* Run a test_pci call */
+       if (test_pci() != 0) {
+               printf("PCI scan failed in secondary\n");
+               if (getuid() == 0) /* pci scans can fail as non-root */
+                       return -1;
+       } else
+               printf("PCI scan succeeded in secondary\n");
+
+       return 0;
+}
+
+/* if called in a primary process, just spawns off a secondary process to
+ * run validation tests - which brings us right back here again...
+ * if called in a secondary process, this runs a series of API tests to check
+ * how things run in a secondary instance.
+ */
+int
+test_mp_secondary(void)
+{
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               if (!test_pci_run) {
+                       printf("=== Running pre-requisite test of test_pci\n");
+                       test_pci();
+                       printf("=== Requisite test done\n");
+               }
+               return run_secondary_instances();
+       }
+
+       printf("IN SECONDARY PROCESS\n");
+
+       return run_object_creation_tests();
+}
+
+#else
+
+/* Baremetal version
+ * Multiprocess not applicable, so just return 0 always
+ */
+int
+test_mp_secondary(void)
+{
+       printf("Multi-process not applicable for baremetal\n");
+       return 0;
+}
+
+#endif
diff --git a/app/test/test_pci.c b/app/test/test_pci.c
new file mode 100644 (file)
index 0000000..1c0c4ed
--- /dev/null
@@ -0,0 +1,192 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+
+#include "test.h"
+
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+
+#define        TEST_BLACKLIST_NUM      0x100
+
+/*
+ * PCI test
+ * ========
+ *
+ * - Register a driver with a ``devinit()`` function.
+ *
+ * - Dump all PCI devices.
+ *
+ * - Check that the ``devinit()`` function is called at least once.
+ */
+
+int test_pci_run = 0; /* value checked by the multiprocess test */
+static unsigned pci_dev_count;
+static unsigned driver_registered = 0;
+static struct rte_pci_addr blacklist[TEST_BLACKLIST_NUM];
+
+static int my_driver_init(struct rte_pci_driver *dr,
+                         struct rte_pci_device *dev);
+
+/*
+ * To test cases where RTE_PCI_DRV_NEED_IGB_UIO is set, and isn't set, two
+ * drivers are created (one with IGB devices, the other with IXGBE devices).
+ */
+
+/* IXGBE NICS + e1000 used for Qemu */
+#define RTE_LIBRTE_IXGBE_PMD 1
+#undef RTE_LIBRTE_IGB_PMD
+struct rte_pci_id my_driver_id[] = {
+
+#include <rte_pci_dev_ids.h>
+
+/* this device is the e1000 of qemu for testing */
+{RTE_PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x100E)},
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+struct rte_pci_id my_driver_id2[] = {
+
+/* IGB NICS */
+#undef RTE_LIBRTE_IXGBE_PMD
+#define RTE_LIBRTE_IGB_PMD 1
+#define RTE_PCI_DEV_USE_82575EB_COPPER
+#include <rte_pci_dev_ids.h>
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+struct rte_pci_driver my_driver = {
+       .name = "test_driver",
+       .devinit = my_driver_init,
+       .id_table = my_driver_id,
+       .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+};
+
+struct rte_pci_driver my_driver2 = {
+       .name = "test_driver2",
+       .devinit = my_driver_init,
+       .id_table = my_driver_id2,
+       .drv_flags = 0,
+};
+
+static int
+my_driver_init(__attribute__((unused)) struct rte_pci_driver *dr,
+              struct rte_pci_device *dev)
+{
+       printf("My driver init called in %s\n", dr->name);
+       printf("%x:%x:%x.%d", dev->addr.domain, dev->addr.bus,
+              dev->addr.devid, dev->addr.function);
+       printf(" - vendor:%x device:%x\n", dev->id.vendor_id, dev->id.device_id);
+
+       pci_dev_count ++;
+       return 0;
+}
+
+static void
+blacklist_clear(void)
+{
+       rte_eal_pci_set_blacklist(NULL, 0);
+}
+
+
+
+static void
+blacklist_all_devices(void)
+{
+       struct rte_pci_device *dev = NULL;
+       unsigned idx = 0;
+
+       memset(blacklist, 0, sizeof (blacklist));
+
+       TAILQ_FOREACH(dev, &device_list, next) {
+               if (idx >= sizeof (blacklist) / sizeof (blacklist[0])) {
+                       printf("Error: too many devices to blacklist");
+                       break;
+               }
+               blacklist[idx] = dev->addr;
+               ++idx;
+       }
+
+       rte_eal_pci_set_blacklist(blacklist, idx);
+       printf("%u devices blacklisted\n", idx);
+}
+
+int
+test_pci(void)
+{
+
+       printf("Dump all devices\n");
+       rte_eal_pci_dump();
+       if (driver_registered == 0) {
+               rte_eal_pci_register(&my_driver);
+               rte_eal_pci_register(&my_driver2);
+               driver_registered = 1;
+       }
+
+       pci_dev_count = 0;
+       printf("Scan bus\n");
+       rte_eal_pci_probe();
+
+       if (pci_dev_count == 0) {
+               printf("no device detected\n");
+               return -1;
+       }
+
+       blacklist_all_devices();
+
+       pci_dev_count = 0;
+       printf("Scan bus with all devices blacklisted\n");
+       rte_eal_pci_probe();
+
+       blacklist_clear();
+
+       if (pci_dev_count != 0) {
+               printf("not all devices are blacklisted\n");
+               return -1;
+       }
+
+       test_pci_run = 1;
+       return 0;
+}
diff --git a/app/test/test_per_lcore.c b/app/test/test_per_lcore.c
new file mode 100644 (file)
index 0000000..001ae03
--- /dev/null
@@ -0,0 +1,142 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+
+#include "test.h"
+
+/*
+ * Per-lcore variables and lcore launch
+ * ====================================
+ *
+ * - Use ``rte_eal_mp_remote_launch()`` to call ``assign_vars()`` on
+ *   every available lcore. In this function, a per-lcore variable is
+ *   assigned to the lcore_id.
+ *
+ * - Use ``rte_eal_mp_remote_launch()`` to call ``display_vars()`` on
+ *   every available lcore. The function checks that the variable is
+ *   correctly set, or returns -1.
+ *
+ * - If at least one per-core variable was not correct, the test function
+ *   returns -1.
+ */
+
+static RTE_DEFINE_PER_LCORE(unsigned, test) = 0x12345678;
+
+static int
+assign_vars(__attribute__((unused)) void *arg)
+{
+       if (RTE_PER_LCORE(test) != 0x12345678)
+               return -1;
+       RTE_PER_LCORE(test) = rte_lcore_id();
+       return 0;
+}
+
+static int
+display_vars(__attribute__((unused)) void *arg)
+{
+       unsigned lcore_id = rte_lcore_id();
+       unsigned var = RTE_PER_LCORE(test);
+       unsigned socket_id = rte_lcore_to_socket_id(lcore_id);
+
+       printf("on socket %u, on core %u, variable is %u\n", socket_id, lcore_id, var);
+       if (lcore_id != var)
+               return -1;
+
+       RTE_PER_LCORE(test) = 0x12345678;
+       return 0;
+}
+
+static int
+test_per_lcore_delay(__attribute__((unused)) void *arg)
+{
+       rte_delay_ms(5000);
+       printf("wait 5000ms on lcore %u\n", rte_lcore_id());
+
+       return 0;
+}
+
+int
+test_per_lcore(void)
+{
+       unsigned lcore_id;
+       int ret;
+
+       rte_eal_mp_remote_launch(assign_vars, NULL, SKIP_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       rte_eal_mp_remote_launch(display_vars, NULL, SKIP_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       /* test if it could do remote launch twice at the same time or not */
+       ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+       if (ret < 0) {
+               printf("It fails to do remote launch but it should able to do\n");
+               return -1;
+       }
+       /* it should not be able to launch a lcore which is running */
+       ret = rte_eal_mp_remote_launch(test_per_lcore_delay, NULL, SKIP_MASTER);
+       if (ret == 0) {
+               printf("It does remote launch successfully but it should not at this time\n");
+               return -1;
+       }
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/app/test/test_prefetch.c b/app/test/test_prefetch.c
new file mode 100644 (file)
index 0000000..8a9b439
--- /dev/null
@@ -0,0 +1,63 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_prefetch.h>
+
+#include "test.h"
+
+/*
+ * Prefetch test
+ * =============
+ *
+ * - Just test that the macro can be called and validate the compilation.
+ *   The test always return success.
+ */
+
+int
+test_prefetch(void)
+{
+       int a;
+
+       rte_prefetch0(&a);
+       rte_prefetch1(&a);
+       rte_prefetch2(&a);
+
+       return 0;
+}
diff --git a/app/test/test_ring.c b/app/test/test_ring.c
new file mode 100644 (file)
index 0000000..d6bb44b
--- /dev/null
@@ -0,0 +1,987 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include <rte_random.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+/*
+ * Ring
+ * ====
+ *
+ * #. Basic tests: done on one core:
+ *
+ *    - Using single producer/single consumer functions:
+ *
+ *      - Enqueue one object, two objects, MAX_BULK objects
+ *      - Dequeue one object, two objects, MAX_BULK objects
+ *      - Check that dequeued pointers are correct
+ *
+ *    - Using multi producers/multi consumers functions:
+ *
+ *      - Enqueue one object, two objects, MAX_BULK objects
+ *      - Dequeue one object, two objects, MAX_BULK objects
+ *      - Check that dequeued pointers are correct
+ *
+ *    - Test watermark and default bulk enqueue/dequeue:
+ *
+ *      - Set watermark
+ *      - Set default bulk value
+ *      - Enqueue objects, check that -EDQUOT is returned when
+ *        watermark is exceeded
+ *      - Check that dequeued pointers are correct
+ *
+ * #. Check quota and watermark
+ *
+ *    - Start a loop on another lcore that will enqueue and dequeue
+ *      objects in a ring. It will monitor the value of quota (default
+ *      bulk count) and watermark.
+ *    - At the same time, change the quota and the watermark on the
+ *      master lcore.
+ *    - The slave lcore will check that bulk count changes from 4 to
+ *      8, and watermark changes from 16 to 32.
+ *
+ * #. Performance tests.
+ *
+ *    This test is done on the following configurations:
+ *
+ *    - One core enqueuing, one core dequeuing
+ *    - One core enqueuing, other cores dequeuing
+ *    - One core dequeuing, other cores enqueuing
+ *    - Half of the cores enqueuing, the other half dequeuing
+ *
+ *    When only one core enqueues/dequeues, the test is done with the
+ *    SP/SC functions in addition to the MP/MC functions.
+ *
+ *    The test is done with different bulk size.
+ *
+ *    On each core, the test enqueues or dequeues objects during
+ *    TIME_S seconds. The number of successes and failures are stored on
+ *    each core, then summed and displayed.
+ *
+ *    The test checks that the number of enqueues is equal to the
+ *    number of dequeues.
+ */
+
+#define RING_SIZE 4096
+#define MAX_BULK 32
+#define N 65536
+#define TIME_S 5
+
+static rte_atomic32_t synchro;
+
+static unsigned bulk_enqueue;
+static unsigned bulk_dequeue;
+static struct rte_ring *r;
+
+struct test_stats {
+       unsigned enq_success ;
+       unsigned enq_quota;
+       unsigned enq_fail;
+
+       unsigned deq_success;
+       unsigned deq_fail;
+} __rte_cache_aligned;
+
+static struct test_stats test_stats[RTE_MAX_LCORE];
+
+#define DEFINE_ENQUEUE_FUNCTION(name, enq_code)                        \
+static int                                                     \
+name(__attribute__((unused)) void *arg)                                \
+{                                                              \
+       unsigned success = 0;                                   \
+       unsigned quota = 0;                                     \
+       unsigned fail = 0;                                      \
+       unsigned i;                                             \
+       unsigned long dummy_obj;                                \
+       void *obj_table[MAX_BULK];                              \
+       int ret;                                                \
+       unsigned lcore_id = rte_lcore_id();                     \
+       uint64_t start_cycles, end_cycles;                      \
+       uint64_t time_diff = 0, hz = rte_get_hpet_hz();         \
+                                                               \
+       /* init dummy object table */                           \
+       for (i = 0; i< MAX_BULK; i++) {                         \
+               dummy_obj = lcore_id + 0x1000 + i;              \
+               obj_table[i] = (void *)dummy_obj;               \
+       }                                                       \
+                                                               \
+       /* wait synchro for slaves */                           \
+       if (lcore_id != rte_get_master_lcore())                 \
+               while (rte_atomic32_read(&synchro) == 0);       \
+                                                               \
+       start_cycles = rte_get_hpet_cycles();                   \
+                                                               \
+       /* enqueue as many object as possible */                \
+       while (time_diff/hz < TIME_S) {                         \
+               for (i = 0; likely(i < N); i++) {               \
+                       ret = enq_code;                         \
+                       if (ret == 0)                           \
+                               success++;                      \
+                       else if (ret == -EDQUOT)                \
+                               quota++;                        \
+                       else                                    \
+                               fail++;                         \
+               }                                               \
+               end_cycles = rte_get_hpet_cycles();             \
+               time_diff = end_cycles - start_cycles;          \
+       }                                                       \
+                                                               \
+       /* write statistics in a shared structure */            \
+       test_stats[lcore_id].enq_success = success;             \
+       test_stats[lcore_id].enq_quota = quota;                 \
+       test_stats[lcore_id].enq_fail = fail;                   \
+                                                               \
+       return 0;                                               \
+}
+
+#define DEFINE_DEQUEUE_FUNCTION(name, deq_code)                        \
+static int                                                     \
+name(__attribute__((unused)) void *arg)                                \
+{                                                              \
+       unsigned success = 0;                                   \
+       unsigned fail = 0;                                      \
+       unsigned i;                                             \
+       void *obj_table[MAX_BULK];                              \
+       int ret;                                                \
+       unsigned lcore_id = rte_lcore_id();                     \
+       uint64_t start_cycles, end_cycles;                      \
+       uint64_t time_diff = 0, hz = rte_get_hpet_hz();         \
+                                                               \
+       /* wait synchro for slaves */                           \
+       if (lcore_id != rte_get_master_lcore())                 \
+               while (rte_atomic32_read(&synchro) == 0);       \
+                                                               \
+       start_cycles = rte_get_hpet_cycles();                   \
+                                                               \
+       /* dequeue as many object as possible */                \
+       while (time_diff/hz < TIME_S) {                         \
+               for (i = 0; likely(i < N); i++) {               \
+                       ret = deq_code;                         \
+                       if (ret == 0)                           \
+                               success++;                      \
+                       else                                    \
+                               fail++;                         \
+               }                                               \
+               end_cycles = rte_get_hpet_cycles();             \
+               time_diff = end_cycles - start_cycles;          \
+       }                                                       \
+                                                               \
+       /* write statistics in a shared structure */            \
+       test_stats[lcore_id].deq_success = success;             \
+       test_stats[lcore_id].deq_fail = fail;                   \
+                                                               \
+       return 0;                                               \
+}
+
+DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_sp_enqueue,
+                       rte_ring_sp_enqueue_bulk(r, obj_table, bulk_enqueue))
+
+DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_sc_dequeue,
+                       rte_ring_sc_dequeue_bulk(r, obj_table, bulk_dequeue))
+
+DEFINE_ENQUEUE_FUNCTION(test_ring_per_core_mp_enqueue,
+                       rte_ring_mp_enqueue_bulk(r, obj_table, bulk_enqueue))
+
+DEFINE_DEQUEUE_FUNCTION(test_ring_per_core_mc_dequeue,
+                       rte_ring_mc_dequeue_bulk(r, obj_table, bulk_dequeue))
+
+#define        TEST_RING_VERIFY(exp)                                           \
+       if (!(exp)) {                                                   \
+               printf("error at %s:%d\tcondition " #exp " failed\n",   \
+                   __func__, __LINE__);                                \
+               rte_ring_dump(r);                                       \
+               return (-1);                                            \
+       }
+
+#define        TEST_RING_FULL_EMTPY_ITER       8
+
+
+static int
+launch_cores(unsigned enq_core_count, unsigned deq_core_count, int sp, int sc)
+{
+       void *obj;
+       unsigned lcore_id;
+       unsigned rate, deq_remain = 0;
+       unsigned enq_total, deq_total;
+       struct test_stats sum;
+       int (*enq_f)(void *);
+       int (*deq_f)(void *);
+       unsigned cores = enq_core_count + deq_core_count;
+       int ret;
+
+       rte_atomic32_set(&synchro, 0);
+
+       printf("ring_autotest e/d_core=%u,%u e/d_bulk=%u,%u ",
+              enq_core_count, deq_core_count, bulk_enqueue, bulk_dequeue);
+       printf("sp=%d sc=%d ", sp, sc);
+
+       /* set enqueue function to be used */
+       if (sp)
+               enq_f = test_ring_per_core_sp_enqueue;
+       else
+               enq_f = test_ring_per_core_mp_enqueue;
+
+       /* set dequeue function to be used */
+       if (sc)
+               deq_f = test_ring_per_core_sc_dequeue;
+       else
+               deq_f = test_ring_per_core_mc_dequeue;
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (enq_core_count != 0) {
+                       enq_core_count--;
+                       rte_eal_remote_launch(enq_f, NULL, lcore_id);
+               }
+               if (deq_core_count != 1) {
+                       deq_core_count--;
+                       rte_eal_remote_launch(deq_f, NULL, lcore_id);
+               }
+       }
+
+       memset(test_stats, 0, sizeof(test_stats));
+
+       /* start synchro and launch test on master */
+       rte_atomic32_set(&synchro, 1);
+       ret = deq_f(NULL);
+
+       /* wait all cores */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (cores == 1)
+                       break;
+               cores--;
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       ret = -1;
+       }
+
+       memset(&sum, 0, sizeof(sum));
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               sum.enq_success += test_stats[lcore_id].enq_success;
+               sum.enq_quota += test_stats[lcore_id].enq_quota;
+               sum.enq_fail += test_stats[lcore_id].enq_fail;
+               sum.deq_success += test_stats[lcore_id].deq_success;
+               sum.deq_fail += test_stats[lcore_id].deq_fail;
+       }
+
+       /* empty the ring */
+       while (rte_ring_sc_dequeue(r, &obj) == 0)
+               deq_remain += 1;
+
+       if (ret < 0) {
+               printf("per-lcore test returned -1\n");
+               return -1;
+       }
+
+       enq_total = (sum.enq_success * bulk_enqueue) +
+               (sum.enq_quota * bulk_enqueue);
+       deq_total = (sum.deq_success * bulk_dequeue) + deq_remain;
+
+       rate = deq_total/TIME_S;
+
+       printf("rate_persec=%u\n", rate);
+
+       if (enq_total != deq_total) {
+               printf("invalid enq/deq_success counter: %u %u\n",
+                      enq_total, deq_total);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+do_one_ring_test2(unsigned enq_core_count, unsigned deq_core_count,
+                 unsigned n_enq_bulk, unsigned n_deq_bulk)
+{
+       int sp, sc;
+       int do_sp, do_sc;
+       int ret;
+
+       bulk_enqueue = n_enq_bulk;
+       bulk_dequeue = n_deq_bulk;
+
+       do_sp = (enq_core_count == 1) ? 1 : 0;
+       do_sc = (deq_core_count  == 1) ? 1 : 0;
+
+       for (sp = 0; sp <= do_sp; sp ++) {
+               for (sc = 0; sc <= do_sc; sc ++) {
+                       ret = launch_cores(enq_core_count,
+                                          deq_core_count,
+                                          sp, sc);
+                       if (ret < 0)
+                               return -1;
+               }
+       }
+       return 0;
+}
+
+static int
+do_one_ring_test(unsigned enq_core_count, unsigned deq_core_count)
+{
+       unsigned bulk_enqueue_tab[] = { 1, 2, 4, 32, 0 };
+       unsigned bulk_dequeue_tab[] = { 1, 2, 4, 32, 0 };
+       unsigned *bulk_enqueue_ptr;
+       unsigned *bulk_dequeue_ptr;
+       int ret;
+
+       for (bulk_enqueue_ptr = bulk_enqueue_tab;
+            *bulk_enqueue_ptr;
+            bulk_enqueue_ptr++) {
+
+               for (bulk_dequeue_ptr = bulk_dequeue_tab;
+                    *bulk_dequeue_ptr;
+                    bulk_dequeue_ptr++) {
+
+                       ret = do_one_ring_test2(enq_core_count, deq_core_count,
+                                               *bulk_enqueue_ptr,
+                                               *bulk_dequeue_ptr);
+                       if (ret < 0)
+                               return -1;
+               }
+       }
+       return 0;
+}
+
+static int
+check_quota_and_watermark(__attribute__((unused)) void *dummy)
+{
+       uint64_t hz = rte_get_hpet_hz();
+       void *obj_table[MAX_BULK];
+       unsigned watermark, watermark_old = 16;
+       uint64_t cur_time, end_time;
+       int64_t diff = 0;
+       int i, ret;
+       unsigned quota, quota_old = 4;
+
+       /* init the object table */
+       memset(obj_table, 0, sizeof(obj_table));
+       end_time = rte_get_hpet_cycles() + (hz * 2);
+
+       /* check that bulk and watermark are 4 and 32 (respectively) */
+       while (diff >= 0) {
+
+               /* read quota, the only change allowed is from 4 to 8 */
+               quota = rte_ring_get_bulk_count(r);
+               if (quota != quota_old && (quota_old != 4 || quota != 8)) {
+                       printf("Bad quota change %u -> %u\n", quota_old,
+                              quota);
+                       return -1;
+               }
+               quota_old = quota;
+
+               /* add in ring until we reach watermark */
+               ret = 0;
+               for (i = 0; i < 16; i ++) {
+                       if (ret != 0)
+                               break;
+                       ret = rte_ring_enqueue_bulk(r, obj_table, quota);
+               }
+
+               if (ret != -EDQUOT) {
+                       printf("Cannot enqueue objects, or watermark not "
+                              "reached (ret=%d)\n", ret);
+                       return -1;
+               }
+
+               /* read watermark, the only change allowed is from 16 to 32 */
+               watermark = i * quota;
+               if (watermark != watermark_old &&
+                   (watermark_old != 16 || watermark != 32)) {
+                       printf("Bad watermark change %u -> %u\n", watermark_old,
+                              watermark);
+                       return -1;
+               }
+               watermark_old = watermark;
+
+               /* dequeue objects from ring */
+               while (i--) {
+                       ret = rte_ring_dequeue_bulk(r, obj_table, quota);
+                       if (ret != 0) {
+                               printf("Cannot dequeue (ret=%d)\n", ret);
+                               return -1;
+                       }
+               }
+
+               cur_time = rte_get_hpet_cycles();
+               diff = end_time - cur_time;
+       }
+
+       if (watermark_old != 32 || quota_old != 8) {
+               printf("quota or watermark was not updated (q=%u wm=%u)\n",
+                      quota_old, watermark_old);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+test_quota_and_watermark(void)
+{
+       unsigned lcore_id = rte_lcore_id();
+       unsigned lcore_id2 = rte_get_next_lcore(lcore_id, 0, 1);
+
+       printf("Test quota and watermark live modification\n");
+
+       rte_ring_set_bulk_count(r, 4);
+       rte_ring_set_water_mark(r, 16);
+
+       /* launch a thread that will enqueue and dequeue, checking
+        * watermark and quota */
+       rte_eal_remote_launch(check_quota_and_watermark, NULL, lcore_id2);
+
+       rte_delay_ms(1000);
+       rte_ring_set_bulk_count(r, 8);
+       rte_ring_set_water_mark(r, 32);
+       rte_delay_ms(1000);
+
+       if (rte_eal_wait_lcore(lcore_id2) < 0)
+               return -1;
+
+       return 0;
+}
+/* Test for catch on invalid watermark values */
+static int
+test_set_watermark( void ){
+       unsigned count;
+       int setwm;
+
+       struct rte_ring *r = rte_ring_lookup("test_ring_basic_ex");
+       if(r == NULL){
+               printf( " ring lookup failed\n" );
+               goto error;
+       }
+       count = r->prod.size*2;
+       setwm = rte_ring_set_water_mark(r, count);
+       if (setwm != -EINVAL){
+               printf("Test failed to detect invalid watermark count value\n");
+               goto error;
+       }
+
+       count = 0;
+       setwm = rte_ring_set_water_mark(r, count);
+       if (r->prod.watermark != r->prod.size) {
+               printf("Test failed to detect invalid watermark count value\n");
+               goto error;
+       }
+       return 0;
+
+error:
+       return -1;
+}
+
+/*
+ * helper routine for test_ring_basic
+ */
+static int
+test_ring_basic_full_empty(void * const src[], void *dst[])
+{
+       unsigned i, rand;
+       const unsigned rsz = RING_SIZE - 1;
+
+       printf("Basic full/empty test\n");
+
+       for (i = 0; TEST_RING_FULL_EMTPY_ITER != i; i++) {
+
+               /* random shift in the ring */
+               rand = RTE_MAX(rte_rand() % RING_SIZE, 1UL);
+               printf("%s: iteration %u, random shift: %u;\n",
+                   __func__, i, rand);
+               TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
+                   rand));
+               TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rand));
+
+               /* fill the ring */
+               TEST_RING_VERIFY(-ENOBUFS != rte_ring_enqueue_bulk(r, src,
+                   rsz));
+               TEST_RING_VERIFY(0 == rte_ring_free_count(r));
+               TEST_RING_VERIFY(rsz == rte_ring_count(r));
+               TEST_RING_VERIFY(rte_ring_full(r));
+               TEST_RING_VERIFY(0 == rte_ring_empty(r));
+
+               /* empty the ring */
+               TEST_RING_VERIFY(0 == rte_ring_dequeue_bulk(r, dst, rsz));
+               TEST_RING_VERIFY(rsz == rte_ring_free_count(r));
+               TEST_RING_VERIFY(0 == rte_ring_count(r));
+               TEST_RING_VERIFY(0 == rte_ring_full(r));
+               TEST_RING_VERIFY(rte_ring_empty(r));
+
+               /* check data */
+               TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
+               rte_ring_dump(r);
+       }
+       return (0);
+}
+
+static int
+test_ring_basic(void)
+{
+       void **src = NULL, **cur_src = NULL, **dst = NULL, **cur_dst = NULL;
+       int ret;
+       unsigned i, n;
+
+       /* alloc dummy object pointers */
+       src = malloc(RING_SIZE*2*sizeof(void *));
+       if (src == NULL)
+               goto fail;
+
+       for (i = 0; i < RING_SIZE*2 ; i++) {
+               src[i] = (void *)(unsigned long)i;
+       }
+       cur_src = src;
+
+       /* alloc some room for copied objects */
+       dst = malloc(RING_SIZE*2*sizeof(void *));
+       if (dst == NULL)
+               goto fail;
+
+       memset(dst, 0, RING_SIZE*2*sizeof(void *));
+       cur_dst = dst;
+
+       printf("enqueue 1 obj\n");
+       ret = rte_ring_sp_enqueue_bulk(r, cur_src, 1);
+       cur_src += 1;
+       if (ret != 0)
+               goto fail;
+
+       printf("enqueue 2 objs\n");
+       ret = rte_ring_sp_enqueue_bulk(r, cur_src, 2);
+       cur_src += 2;
+       if (ret != 0)
+               goto fail;
+
+       printf("enqueue MAX_BULK objs\n");
+       ret = rte_ring_sp_enqueue_bulk(r, cur_src, MAX_BULK);
+       cur_src += MAX_BULK;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue 1 obj\n");
+       ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 1);
+       cur_dst += 1;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue 2 objs\n");
+       ret = rte_ring_sc_dequeue_bulk(r, cur_dst, 2);
+       cur_dst += 2;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue MAX_BULK objs\n");
+       ret = rte_ring_sc_dequeue_bulk(r, cur_dst, MAX_BULK);
+       cur_dst += MAX_BULK;
+       if (ret != 0)
+               goto fail;
+
+       /* check data */
+       if (memcmp(src, dst, cur_dst - dst)) {
+               test_hexdump("src", src, cur_src - src);
+               test_hexdump("dst", dst, cur_dst - dst);
+               printf("data after dequeue is not the same\n");
+               goto fail;
+       }
+       cur_src = src;
+       cur_dst = dst;
+
+       printf("enqueue 1 obj\n");
+       ret = rte_ring_mp_enqueue_bulk(r, cur_src, 1);
+       cur_src += 1;
+       if (ret != 0)
+               goto fail;
+
+       printf("enqueue 2 objs\n");
+       ret = rte_ring_mp_enqueue_bulk(r, cur_src, 2);
+       cur_src += 2;
+       if (ret != 0)
+               goto fail;
+
+       printf("enqueue MAX_BULK objs\n");
+       ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+       cur_src += MAX_BULK;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue 1 obj\n");
+       ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 1);
+       cur_dst += 1;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue 2 objs\n");
+       ret = rte_ring_mc_dequeue_bulk(r, cur_dst, 2);
+       cur_dst += 2;
+       if (ret != 0)
+               goto fail;
+
+       printf("dequeue MAX_BULK objs\n");
+       ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+       cur_dst += MAX_BULK;
+       if (ret != 0)
+               goto fail;
+
+       /* check data */
+       if (memcmp(src, dst, cur_dst - dst)) {
+               test_hexdump("src", src, cur_src - src);
+               test_hexdump("dst", dst, cur_dst - dst);
+               printf("data after dequeue is not the same\n");
+               goto fail;
+       }
+       cur_src = src;
+       cur_dst = dst;
+
+       printf("fill and empty the ring\n");
+       for (i = 0; i<RING_SIZE/MAX_BULK; i++) {
+               ret = rte_ring_mp_enqueue_bulk(r, cur_src, MAX_BULK);
+               cur_src += MAX_BULK;
+               if (ret != 0)
+                       goto fail;
+               ret = rte_ring_mc_dequeue_bulk(r, cur_dst, MAX_BULK);
+               cur_dst += MAX_BULK;
+               if (ret != 0)
+                       goto fail;
+       }
+
+       /* check data */
+       if (memcmp(src, dst, cur_dst - dst)) {
+               test_hexdump("src", src, cur_src - src);
+               test_hexdump("dst", dst, cur_dst - dst);
+               printf("data after dequeue is not the same\n");
+               goto fail;
+       }
+
+       if (test_ring_basic_full_empty(src, dst) != 0)
+               goto fail;
+
+       cur_src = src;
+       cur_dst = dst;
+
+       printf("test watermark and default bulk enqueue / dequeue\n");
+       rte_ring_set_bulk_count(r, 16);
+       rte_ring_set_water_mark(r, 20);
+       n = rte_ring_get_bulk_count(r);
+       if (n != 16) {
+               printf("rte_ring_get_bulk_count() returned %u instead "
+                      "of 16\n", n);
+               goto fail;
+       }
+
+       cur_src = src;
+       cur_dst = dst;
+       ret = rte_ring_enqueue_bulk(r, cur_src, n);
+       cur_src += 16;
+       if (ret != 0) {
+               printf("Cannot enqueue\n");
+               goto fail;
+       }
+       ret = rte_ring_enqueue_bulk(r, cur_src, n);
+       cur_src += 16;
+       if (ret != -EDQUOT) {
+               printf("Watermark not exceeded\n");
+               goto fail;
+       }
+       ret = rte_ring_dequeue_bulk(r, cur_dst, n);
+       cur_dst += 16;
+       if (ret != 0) {
+               printf("Cannot dequeue\n");
+               goto fail;
+       }
+       ret = rte_ring_dequeue_bulk(r, cur_dst, n);
+       cur_dst += 16;
+       if (ret != 0) {
+               printf("Cannot dequeue2\n");
+               goto fail;
+       }
+
+       /* check data */
+       if (memcmp(src, dst, cur_dst - dst)) {
+               test_hexdump("src", src, cur_src - src);
+               test_hexdump("dst", dst, cur_dst - dst);
+               printf("data after dequeue is not the same\n");
+               goto fail;
+       }
+       cur_src = src;
+       cur_dst = dst;
+
+       if (src)
+               free(src);
+       if (dst)
+               free(dst);
+       return 0;
+
+ fail:
+       if (src)
+               free(src);
+       if (dst)
+               free(dst);
+       return -1;
+}
+
+/*
+ * it will always fail to create ring with a wrong ring size number in this function
+ */
+static int
+test_ring_creation_with_wrong_size(void)
+{
+       struct rte_ring * rp = NULL;
+
+       rp = rte_ring_create("test_bad_ring_size", RING_SIZE+1, SOCKET_ID_ANY, 0);
+       if (NULL != rp) {
+               return -1;
+       }
+
+       return 0;
+}
+
+/*
+ * it tests if it would always fail to create ring with an used ring name
+ */
+static int
+test_ring_creation_with_an_used_name(void)
+{
+       struct rte_ring * rp;
+
+       rp = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
+       if (NULL != rp)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Test to if a non-power of 2 count causes the create
+ * function to fail correctly
+ */
+static int
+test_create_count_odd(void)
+{
+       struct rte_ring *r = rte_ring_create("test_ring_count",
+                       4097, SOCKET_ID_ANY, 0 );
+       if(r != NULL){
+               return -1;
+       }
+       return 0;
+}
+
+static int
+test_lookup_null(void)
+{
+       struct rte_ring *rlp = rte_ring_lookup("ring_not_found");
+       if (rlp ==NULL)
+       if (rte_errno != ENOENT){
+               printf( "test failed to returnn error on null pointer\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * it tests some more basic ring operations
+ */
+static int
+test_ring_basic_ex(void)
+{
+       int ret = -1;
+       unsigned i;
+       struct rte_ring * rp;
+       void **obj = NULL;
+
+       obj = (void **)rte_zmalloc("test_ring_basic_ex_malloc", (RING_SIZE * sizeof(void *)), 0);
+       if (obj == NULL) {
+               printf("test_ring_basic_ex fail to rte_malloc\n");
+               goto fail_test;
+       }
+
+       rp = rte_ring_create("test_ring_basic_ex", RING_SIZE, SOCKET_ID_ANY, 0);
+       if (rp == NULL) {
+               printf("test_ring_basic_ex fail to create ring\n");
+               goto fail_test;
+       }
+
+       if (rte_ring_lookup("test_ring_basic_ex") != rp) {
+               goto fail_test;
+       }
+
+       if (rte_ring_empty(rp) != 1) {
+               printf("test_ring_basic_ex ring is not empty but it should be\n");
+               goto fail_test;
+       }
+
+       printf("%u ring entries are now free\n", rte_ring_free_count(rp));
+
+       for (i = 0; i < RING_SIZE; i ++) {
+               rte_ring_enqueue(rp, obj[i]);
+       }
+
+       if (rte_ring_full(rp) != 1) {
+               printf("test_ring_basic_ex ring is not full but it should be\n");
+               goto fail_test;
+       }
+
+       for (i = 0; i < RING_SIZE; i ++) {
+               rte_ring_dequeue(rp, &obj[i]);
+       }
+
+       if (rte_ring_empty(rp) != 1) {
+               printf("test_ring_basic_ex ring is not empty but it should be\n");
+               goto fail_test;
+       }
+
+       ret = 0;
+fail_test:
+       if (obj != NULL)
+               rte_free(obj);
+
+       return ret;
+}
+
+int
+test_ring(void)
+{
+       unsigned enq_core_count, deq_core_count;
+
+       /* some more basic operations */
+       if (test_ring_basic_ex() < 0)
+               return -1;
+
+       rte_atomic32_init(&synchro);
+
+       if (r == NULL)
+               r = rte_ring_create("test", RING_SIZE, SOCKET_ID_ANY, 0);
+       if (r == NULL)
+               return -1;
+
+       /* retrieve the ring from its name */
+       if (rte_ring_lookup("test") != r) {
+               printf("Cannot lookup ring from its name\n");
+               return -1;
+       }
+
+       /* basic operations */
+       if (test_ring_basic() < 0)
+               return -1;
+
+       /* basic operations */
+       if (test_quota_and_watermark() < 0)
+               return -1;
+
+       if ( test_set_watermark() < 0){
+               printf ("Test failed to detect invalid parameter\n");
+               return -1;
+       }
+       else
+               printf ( "Test detected forced bad watermark values\n");
+
+       if ( test_create_count_odd() < 0){
+                       printf ("Test failed to detect odd count\n");
+                       return -1;
+               }
+               else
+                       printf ( "Test detected odd count\n");
+
+       if ( test_lookup_null() < 0){
+                               printf ("Test failed to detect NULL ring lookup\n");
+                               return -1;
+                       }
+                       else
+                               printf ( "Test detected NULL ring lookup \n");
+
+
+       printf("start performance tests\n");
+
+       /* one lcore for enqueue, one for dequeue */
+       enq_core_count = 1;
+       deq_core_count = 1;
+       if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
+               return -1;
+
+       /* max cores for enqueue, one for dequeue */
+       enq_core_count = rte_lcore_count() - 1;
+       deq_core_count = 1;
+       if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
+               return -1;
+
+       /* max cores for dequeue, one for enqueue */
+       enq_core_count = 1;
+       deq_core_count = rte_lcore_count() - 1;
+       if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
+               return -1;
+
+       /* half for enqueue and half for dequeue */
+       enq_core_count = rte_lcore_count() / 2;
+       deq_core_count = rte_lcore_count() / 2;
+       if (do_one_ring_test(enq_core_count, deq_core_count) < 0)
+               return -1;
+
+       /* test of creating ring with wrong size */
+       if (test_ring_creation_with_wrong_size() < 0)
+               return -1;
+
+       /* test of creation ring with an used name */
+       if (test_ring_creation_with_an_used_name() < 0)
+               return -1;
+
+       /* dump the ring status */
+       rte_ring_list_dump();
+
+       return 0;
+}
diff --git a/app/test/test_rwlock.c b/app/test/test_rwlock.c
new file mode 100644 (file)
index 0000000..b8420f7
--- /dev/null
@@ -0,0 +1,135 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_rwlock.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+
+#include "test.h"
+
+/*
+ * rwlock test
+ * ===========
+ *
+ * - There is a global rwlock and a table of rwlocks (one per lcore).
+ *
+ * - The test function takes all of these locks and launches the
+ *   ``test_rwlock_per_core()`` function on each core (except the master).
+ *
+ *   - The function takes the global write lock, display something,
+ *     then releases the global lock.
+ *   - Then, it takes the per-lcore write lock, display something, and
+ *     releases the per-core lock.
+ *   - Finally, a read lock is taken during 100 ms, then released.
+ *
+ * - The main function unlocks the per-lcore locks sequentially and
+ *   waits between each lock. This triggers the display of a message
+ *   for each core, in the correct order.
+ *
+ *   Then, it tries to take the global write lock and display the last
+ *   message. The autotest script checks that the message order is correct.
+ */
+
+static rte_rwlock_t sl;
+static rte_rwlock_t sl_tab[RTE_MAX_LCORE];
+
+static int
+test_rwlock_per_core(__attribute__((unused)) void *arg)
+{
+       rte_rwlock_write_lock(&sl);
+       printf("Global write lock taken on core %u\n", rte_lcore_id());
+       rte_rwlock_write_unlock(&sl);
+
+       rte_rwlock_write_lock(&sl_tab[rte_lcore_id()]);
+       printf("Hello from core %u !\n", rte_lcore_id());
+       rte_rwlock_write_unlock(&sl_tab[rte_lcore_id()]);
+
+       rte_rwlock_read_lock(&sl);
+       printf("Global read lock taken on core %u\n", rte_lcore_id());
+       rte_delay_ms(100);
+       printf("Release global read lock on core %u\n", rte_lcore_id());
+       rte_rwlock_read_unlock(&sl);
+
+       return 0;
+}
+
+int
+test_rwlock(void)
+{
+       int i;
+
+       rte_rwlock_init(&sl);
+       for (i=0; i<RTE_MAX_LCORE; i++)
+               rte_rwlock_init(&sl_tab[i]);
+
+       rte_rwlock_write_lock(&sl);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_rwlock_write_lock(&sl_tab[i]);
+               rte_eal_remote_launch(test_rwlock_per_core, NULL, i);
+       }
+
+       rte_rwlock_write_unlock(&sl);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_rwlock_write_unlock(&sl_tab[i]);
+               rte_delay_ms(100);
+       }
+
+       rte_rwlock_write_lock(&sl);
+       /* this message should be the last message of test */
+       printf("Global write lock taken on master core %u\n", rte_lcore_id());
+       rte_rwlock_write_unlock(&sl);
+
+       rte_eal_mp_wait_lcore();
+
+       return 0;
+}
diff --git a/app/test/test_spinlock.c b/app/test/test_spinlock.c
new file mode 100644 (file)
index 0000000..78d8a0f
--- /dev/null
@@ -0,0 +1,318 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <unistd.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_spinlock.h>
+
+#include "test.h"
+
+/*
+ * Spinlock test
+ * =============
+ *
+ * - There is a global spinlock and a table of spinlocks (one per lcore).
+ *
+ * - The test function takes all of these locks and launches the
+ *   ``test_spinlock_per_core()`` function on each core (except the master).
+ *
+ *   - The function takes the global lock, display something, then releases
+ *     the global lock.
+ *   - The function takes the per-lcore lock, display something, then releases
+ *     the per-core lock.
+ *
+ * - The main function unlocks the per-lcore locks sequentially and
+ *   waits between each lock. This triggers the display of a message
+ *   for each core, in the correct order. The autotest script checks that
+ *   this order is correct.
+ *
+ * - A load test is carried out, with all cores attempting to lock a single lock
+ *   multiple times
+ */
+
+static rte_spinlock_t sl, sl_try;
+static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
+static rte_spinlock_recursive_t slr;
+static unsigned count;
+
+static int
+test_spinlock_per_core(__attribute__((unused)) void *arg)
+{
+       rte_spinlock_lock(&sl);
+       printf("Global lock taken on core %u\n", rte_lcore_id());
+       rte_spinlock_unlock(&sl);
+
+       rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
+       printf("Hello from core %u !\n", rte_lcore_id());
+       rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
+
+       return 0;
+}
+
+static int
+test_spinlock_recursive_per_core(__attribute__((unused)) void *arg)
+{
+       unsigned id = rte_lcore_id();
+
+       rte_spinlock_recursive_lock(&slr);
+       printf("Global recursive lock taken on core %u - count = %d\n",
+              id, slr.count);
+       rte_spinlock_recursive_lock(&slr);
+       printf("Global recursive lock taken on core %u - count = %d\n",
+              id, slr.count);
+       rte_spinlock_recursive_lock(&slr);
+       printf("Global recursive lock taken on core %u - count = %d\n",
+              id, slr.count);
+
+       printf("Hello from within recursive locks from core %u !\n", id);
+
+       rte_spinlock_recursive_unlock(&slr);
+       printf("Global recursive lock released on core %u - count = %d\n",
+              id, slr.count);
+       rte_spinlock_recursive_unlock(&slr);
+       printf("Global recursive lock released on core %u - count = %d\n",
+              id, slr.count);
+       rte_spinlock_recursive_unlock(&slr);
+       printf("Global recursive lock released on core %u - count = %d\n",
+              id, slr.count);
+
+       return 0;
+}
+
+static volatile int count1, count2;
+static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
+static unsigned int max = 10000000; /* 10M */
+static volatile uint64_t looptime[RTE_MAX_LCORE];
+
+static int
+load_loop_fn(__attribute__((unused)) void *dummy)
+{
+       uint64_t end, begin;
+       begin = rte_get_hpet_cycles();
+       unsigned int i = 0;
+       for ( i = 0; i < max; i++) {
+               rte_spinlock_lock(&lk);
+               count1++;
+               rte_spinlock_unlock(&lk);
+               count2++;
+       }
+       end = rte_get_hpet_cycles();
+       looptime[rte_lcore_id()] = end - begin;
+       return 0;
+}
+
+static int
+test_spinlock_load(void)
+{
+       if (rte_lcore_count()<= 1) {
+               printf("no cores counted\n");
+               return -1;
+       }
+       printf ("Running %u tests.......\n", max);
+       printf ("Number of cores = %u\n", rte_lcore_count());
+
+       rte_eal_mp_remote_launch(load_loop_fn, NULL , CALL_MASTER);
+       rte_eal_mp_wait_lcore();
+
+       unsigned int k = 0;
+       uint64_t avgtime = 0;
+
+       RTE_LCORE_FOREACH(k) {
+               printf("Core [%u] time = %"PRIu64"\n", k, looptime[k]);
+               avgtime += looptime[k];
+       }
+
+       avgtime = avgtime / rte_lcore_count();
+       printf("Average time = %"PRIu64"\n", avgtime);
+
+       int check = 0;
+       check =  max * rte_lcore_count();
+       if (count1 == check && count2 != check)
+               printf("Passed Load test\n");
+       else {
+               printf("Failed load test\n");
+               return -1;
+       }
+       return 0;
+}
+
+/*
+ * Use rte_spinlock_trylock() to trylock a spinlock object,
+ * If it could not lock the object sucessfully, it would
+ * return immediately and the variable of "count" would be
+ * increased by one per times. the value of "count" could be
+ * checked as the result later.
+ */
+static int
+test_spinlock_try(__attribute__((unused)) void *arg)
+{
+       if (rte_spinlock_trylock(&sl_try) == 0) {
+               rte_spinlock_lock(&sl);
+               count ++;
+               rte_spinlock_unlock(&sl);
+       }
+
+       return 0;
+}
+
+
+/*
+ * Test rte_eal_get_lcore_state() in addition to spinlocks
+ * as we have "waiting" then "running" lcores.
+ */
+int
+test_spinlock(void)
+{
+       int ret = 0;
+       int i;
+
+       /* slave cores should be waiting: print it */
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               printf("lcore %d state: %d\n", i,
+                      (int) rte_eal_get_lcore_state(i));
+       }
+
+       rte_spinlock_init(&sl);
+       rte_spinlock_init(&sl_try);
+       rte_spinlock_recursive_init(&slr);
+       for (i=0; i<RTE_MAX_LCORE; i++)
+               rte_spinlock_init(&sl_tab[i]);
+
+       rte_spinlock_lock(&sl);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_spinlock_lock(&sl_tab[i]);
+               rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
+       }
+
+       /* slave cores should be busy: print it */
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               printf("lcore %d state: %d\n", i,
+                      (int) rte_eal_get_lcore_state(i));
+       }
+       rte_spinlock_unlock(&sl);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_spinlock_unlock(&sl_tab[i]);
+               rte_delay_ms(100);
+       }
+
+       rte_eal_mp_wait_lcore();
+
+       if (test_spinlock_load()<0)
+               return -1;
+
+       rte_spinlock_recursive_lock(&slr);
+
+       /*
+        * Try to acquire a lock that we already own
+        */
+       if(!rte_spinlock_recursive_trylock(&slr)) {
+               printf("rte_spinlock_recursive_trylock failed on a lock that "
+                      "we already own\n");
+               ret = -1;
+       } else
+               rte_spinlock_recursive_unlock(&slr);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
+       }
+       rte_spinlock_recursive_unlock(&slr);
+       rte_eal_mp_wait_lcore();
+
+       /*
+        * Test if it could return immediately from try-locking a locked object.
+        * Here it will lock the spinlock object first, then launch all the slave
+        * lcores to trylock the same spinlock object.
+        * All the slave lcores should give up try-locking a locked object and
+        * return immediately, and then increase the "count" initialized with zero
+        * by one per times.
+        * We can check if the "count" is finally equal to the number of all slave
+        * lcores to see if the behavior of try-locking a locked spinlock object
+        * is correct.
+        */
+       if (rte_spinlock_trylock(&sl_try) == 0) {
+               return -1;
+       }
+       count = 0;
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               rte_eal_remote_launch(test_spinlock_try, NULL, i);
+       }
+       rte_eal_mp_wait_lcore();
+       rte_spinlock_unlock(&sl_try);
+       if (rte_spinlock_is_locked(&sl)) {
+               printf("spinlock is locked but it should not be\n");
+               return -1;
+       }
+       rte_spinlock_lock(&sl);
+       if (count != ( rte_lcore_count() - 1)) {
+               ret = -1;
+       }
+       rte_spinlock_unlock(&sl);
+
+       /*
+        * Test if it can trylock recursively.
+        * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
+        * object recursively. Here it will try to lock a spinlock object twice.
+        */
+       if (rte_spinlock_recursive_trylock(&slr) == 0) {
+               printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
+               return -1;
+       }
+       if (rte_spinlock_recursive_trylock(&slr) == 0) {
+               printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
+               return -1;
+       }
+       rte_spinlock_recursive_unlock(&slr);
+       rte_spinlock_recursive_unlock(&slr);
+
+       return ret;
+}
diff --git a/app/test/test_string_fns.c b/app/test/test_string_fns.c
new file mode 100644 (file)
index 0000000..f34e9f1
--- /dev/null
@@ -0,0 +1,305 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <errno.h>
+#include <string.h>
+
+#include <rte_string_fns.h>
+
+#include <cmdline_parse.h>
+
+#include "test.h"
+
+#define LOG(...) do {\
+       fprintf(stderr, "%s() ln %d: ", __func__, __LINE__); \
+       fprintf(stderr, __VA_ARGS__); \
+} while(0)
+
+#define DATA_BYTE 'a'
+
+static int
+test_rte_snprintf(void)
+{
+       /* =================================================
+        * First test with a string that will fit in buffer
+        * =================================================*/
+       do {
+               int retval;
+               const char source[] = "This is a string that will fit in buffer";
+               char buf[sizeof(source)+2]; /* make buffer big enough to fit string */
+
+               /* initialise buffer with characters so it can contain no nulls */
+               memset(buf, DATA_BYTE, sizeof(buf));
+
+               /* run rte_snprintf and check results */
+               retval = rte_snprintf(buf, sizeof(buf), "%s", source);
+               if (retval != sizeof(source) - 1) {
+                       LOG("Error, retval = %d, expected = %u\n",
+                                       retval, (unsigned)sizeof(source));
+                       return -1;
+               }
+               if (buf[retval] != '\0') {
+                       LOG("Error, resultant is not null-terminated\n");
+                       return -1;
+               }
+               if (memcmp(source, buf, sizeof(source)-1) != 0){
+                       LOG("Error, corrupt data in buffer\n");
+                       return -1;
+               }
+       } while (0);
+
+       do {
+               /* =================================================
+                * Test with a string that will get truncated
+                * =================================================*/
+               int retval;
+               const char source[] = "This is a long string that won't fit in buffer";
+               char buf[sizeof(source)/2]; /* make buffer half the size */
+
+               /* initialise buffer with characters so it can contain no nulls */
+               memset(buf, DATA_BYTE, sizeof(buf));
+
+               /* run rte_snprintf and check results */
+               retval = rte_snprintf(buf, sizeof(buf), "%s", source);
+               if (retval != sizeof(source) - 1) {
+                       LOG("Error, retval = %d, expected = %u\n",
+                                       retval, (unsigned)sizeof(source));
+                       return -1;
+               }
+               if (buf[sizeof(buf)-1] != '\0') {
+                       LOG("Error, buffer is not null-terminated\n");
+                       return -1;
+               }
+               if (memcmp(source, buf, sizeof(buf)-1) != 0){
+                       LOG("Error, corrupt data in buffer\n");
+                       return -1;
+               }
+       } while (0);
+
+       do {
+               /* ===========================================================
+                * Test using zero-size buf to check how long a buffer we need
+                * ===========================================================*/
+               int retval;
+               const char source[] = "This is a string";
+               char buf[10];
+
+               /* call with a zero-sized non-NULL buffer, should tell how big a buffer
+                * we need */
+               retval = rte_snprintf(buf, 0, "%s", source);
+               if (retval != sizeof(source) - 1) {
+                       LOG("Call with 0-length buffer does not return correct size."
+                                       "Expected: %zu, got: %d\n", sizeof(source), retval);
+                       return -1;
+               }
+
+               /* call with a zero-sized NULL buffer, should tell how big a buffer
+                * we need */
+               retval = rte_snprintf(NULL, 0, "%s", source);
+               if (retval != sizeof(source) - 1) {
+                       LOG("Call with 0-length buffer does not return correct size."
+                                       "Expected: %zu, got: %d\n", sizeof(source), retval);
+                       return -1;
+               }
+
+       } while (0);
+
+       do {
+               /* =================================================
+                * Test with invalid parameter values
+                * =================================================*/
+               const char source[] = "This is a string";
+               char buf[10];
+
+               /* call with buffer value set to NULL is EINVAL */
+               if (rte_snprintf(NULL, sizeof(buf), "%s\n", source) != -1 ||
+                               errno != EINVAL) {
+                       LOG("Failed to get suitable error when passing NULL buffer\n");
+                       return -1;
+               }
+
+               memset(buf, DATA_BYTE, sizeof(buf));
+               /* call with a NULL format and zero-size should return error
+                * without affecting the buffer */
+               if (rte_snprintf(buf, 0, NULL) != -1 ||
+                               errno != EINVAL) {
+                       LOG("Failed to get suitable error when passing NULL buffer\n");
+                       return -1;
+               }
+               if (buf[0] != DATA_BYTE) {
+                       LOG("Error, zero-length buffer modified after call with NULL"
+                                       " format string\n");
+                       return -1;
+               }
+
+               /* call with a NULL format should return error but also null-terminate
+                *  the buffer */
+               if (rte_snprintf(buf, sizeof(buf), NULL) != -1 ||
+                               errno != EINVAL) {
+                       LOG("Failed to get suitable error when passing NULL buffer\n");
+                       return -1;
+               }
+               if (buf[0] != '\0') {
+                       LOG("Error, buffer not null-terminated after call with NULL"
+                                       " format string\n");
+                       return -1;
+               }
+       } while (0);
+
+       LOG("%s - PASSED\n", __func__);
+       return 0;
+}
+
+static int
+test_rte_strsplit(void)
+{
+       int i;
+       do {
+               /* =======================================================
+                * split a mac address correct number of splits requested
+                * =======================================================*/
+               char test_string[] = "54:65:76:87:98:90";
+               char *splits[6];
+
+               LOG("Source string: '%s', to split on ':'\n", test_string);
+               if (rte_strsplit(test_string, sizeof(test_string),
+                               splits, 6, ':') != 6) {
+                       LOG("Error splitting mac address\n");
+                       return -1;
+               }
+               for (i = 0; i < 6; i++)
+                       LOG("Token %d = %s\n", i + 1, splits[i]);
+       } while (0);
+
+
+       do {
+               /* =======================================================
+                * split on spaces smaller number of splits requested
+                * =======================================================*/
+               char test_string[] = "54 65 76 87 98 90";
+               char *splits[6];
+
+               LOG("Source string: '%s', to split on ' '\n", test_string);
+               if (rte_strsplit(test_string, sizeof(test_string),
+                               splits, 3, ' ') != 3) {
+                       LOG("Error splitting mac address for max 2 splits\n");
+                       return -1;
+               }
+               for (i = 0; i < 3; i++)
+                       LOG("Token %d = %s\n", i + 1, splits[i]);
+       } while (0);
+
+       do {
+               /* =======================================================
+                * split on commas - more splits than commas requested
+                * =======================================================*/
+               char test_string[] = "a,b,c,d";
+               char *splits[6];
+
+               LOG("Source string: '%s', to split on ','\n", test_string);
+               if (rte_strsplit(test_string, sizeof(test_string),
+                               splits, 6, ',') != 4) {
+                       LOG("Error splitting %s on ','\n", test_string);
+                       return -1;
+               }
+               for (i = 0; i < 4; i++)
+                       LOG("Token %d = %s\n", i + 1, splits[i]);
+       } while(0);
+
+       do {
+               /* =======================================================
+                * Try splitting on non-existent character.
+                * =======================================================*/
+               char test_string[] = "a,b,c,d";
+               char *splits[6];
+
+               LOG("Source string: '%s', to split on ' '\n", test_string);
+               if (rte_strsplit(test_string, sizeof(test_string),
+                               splits, 6, ' ') != 1) {
+                       LOG("Error splitting %s on ' '\n", test_string);
+                       return -1;
+               }
+               LOG("String not split\n");
+       } while(0);
+
+       do {
+               /* =======================================================
+                * Invalid / edge case parameter checks
+                * =======================================================*/
+               char test_string[] = "a,b,c,d";
+               char *splits[6];
+
+               if (rte_strsplit(NULL, 0, splits, 6, ',') >= 0
+                               || errno != EINVAL){
+                       LOG("Error: rte_strsplit accepted NULL string parameter\n");
+                       return -1;
+               }
+
+               if (rte_strsplit(test_string, sizeof(test_string), NULL, 0, ',') >= 0
+                               || errno != EINVAL){
+                       LOG("Error: rte_strsplit accepted NULL array parameter\n");
+                       return -1;
+               }
+
+               errno = 0;
+               if (rte_strsplit(test_string, 0, splits, 6, ',') != 0 || errno != 0) {
+                       LOG("Error: rte_strsplit did not accept 0 length string\n");
+                       return -1;
+               }
+
+               if (rte_strsplit(test_string, sizeof(test_string), splits, 0, ',') != 0
+                               || errno != 0) {
+                       LOG("Error: rte_strsplit did not accept 0 length array\n");
+                       return -1;
+               }
+
+               LOG("Parameter test cases passed\n");
+       } while(0);
+
+       LOG("%s - PASSED\n", __func__);
+       return 0;
+}
+
+int
+test_string_fns(void)
+{
+       if (test_rte_snprintf() < 0 ||
+                       test_rte_strsplit() < 0)
+               return -1;
+       return 0;
+}
diff --git a/app/test/test_tailq.c b/app/test/test_tailq.c
new file mode 100644 (file)
index 0000000..b67eabd
--- /dev/null
@@ -0,0 +1,125 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_string_fns.h>
+#include <rte_tailq.h>
+
+#include "test.h"
+
+#define do_return(...) do { \
+       printf("Error at %s, line %d: ", __func__, __LINE__); \
+       printf(__VA_ARGS__); \
+       return 1; \
+} while (0)
+
+#define DEFAULT_TAILQ "dummy_q0"
+
+static struct rte_dummy d_elem;
+
+static int
+test_tailq_create(void)
+{
+       struct rte_dummy_head *d_head;
+       char name[RTE_TAILQ_NAMESIZE];
+       unsigned i;
+
+       /* create a first tailq and check its non-null */
+       d_head = RTE_TAILQ_RESERVE(DEFAULT_TAILQ, rte_dummy_head);
+       if (d_head == NULL)
+               do_return("Error allocating "DEFAULT_TAILQ"\n");
+
+       /* check we can add an item to it
+        */
+       TAILQ_INSERT_TAIL(d_head, &d_elem, next);
+
+       /* try allocating dummy_q0 again, and check for failure */
+       if (RTE_TAILQ_RESERVE(DEFAULT_TAILQ, rte_dummy_head) != NULL)
+               do_return("Error, non-null result returned when attemption to "
+                               "re-allocate a tailq\n");
+
+       /* now fill up the tailq slots available and check we get an error */
+       for (i = 1; i < RTE_MAX_TAILQ; i++){
+               rte_snprintf(name, sizeof(name), "dummy_q%u", i);
+               if ((d_head = RTE_TAILQ_RESERVE(name, rte_dummy_head)) == NULL)
+                       break;
+       }
+
+       /* check that we had an error return before RTE_MAX_TAILQ */
+       if (i == RTE_MAX_TAILQ)
+               do_return("Error, we did not have a reservation failure as expected\n");
+
+       return 0;
+}
+
+static int
+test_tailq_lookup(void)
+{
+       /* run successful  test - check result is found */
+       struct rte_dummy_head *d_head;
+       struct rte_dummy *d_ptr;
+
+       d_head = RTE_TAILQ_LOOKUP(DEFAULT_TAILQ, rte_dummy_head);
+       if (d_head == NULL)
+               do_return("Error with tailq lookup\n");
+
+       TAILQ_FOREACH(d_ptr, d_head, next)
+               if (d_ptr != &d_elem)
+                       do_return("Error with tailq returned from lookup - "
+                                       "expected element not found\n");
+
+       /* now try a bad/error lookup */
+       d_head = RTE_TAILQ_LOOKUP("does_not_exist_queue", rte_dummy_head);
+       if (d_head != NULL)
+               do_return("Error, lookup does not return NULL for bad tailq name\n");
+
+       return 0;
+}
+
+int
+test_tailq(void)
+{
+       int ret = 0;
+       ret |= test_tailq_create();
+       ret |= test_tailq_lookup();
+       return ret;
+}
diff --git a/app/test/test_timer.c b/app/test/test_timer.c
new file mode 100644 (file)
index 0000000..b3aea8c
--- /dev/null
@@ -0,0 +1,363 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Timer
+ * =====
+ *
+ * #. Stress tests.
+ *
+ *    The objective of the timer stress tests is to check that there are no
+ *    race conditions in list and status management. This test launches,
+ *    resets and stops the timer very often on many cores at the same
+ *    time.
+ *
+ *    - Only one timer is used for this test.
+ *    - On each core, the rte_timer_manage() function is called from the main
+ *      loop every 3 microseconds.
+ *    - In the main loop, the timer may be reset (randomly, with a
+ *      probability of 0.5 %) 100 microseconds later on a random core, or
+ *      stopped (with a probability of 0.5 % also).
+ *    - In callback, the timer is can be reset (randomly, with a
+ *      probability of 0.5 %) 100 microseconds later on the same core or
+ *      on another core (same probability), or stopped (same
+ *      probability).
+ *
+ *
+ * #. Basic test.
+ *
+ *    This test performs basic functional checks of the timers. The test
+ *    uses four different timers that are loaded and stopped under
+ *    specific conditions in specific contexts.
+ *
+ *    - Four timers are used for this test.
+ *    - On each core, the rte_timer_manage() function is called from main loop
+ *      every 3 microseconds.
+ *
+ *    The autotest python script checks that the behavior is correct:
+ *
+ *    - timer0
+ *
+ *      - At initialization, timer0 is loaded by the master core, on master core
+ *        in "single" mode (time = 1 second).
+ *      - In the first 19 callbacks, timer0 is reloaded on the same core,
+ *        then, it is explicitly stopped at the 20th call.
+ *      - At t=25s, timer0 is reloaded once by timer2.
+ *
+ *    - timer1
+ *
+ *      - At initialization, timer1 is loaded by the master core, on the
+ *        master core in "single" mode (time = 2 seconds).
+ *      - In the first 9 callbacks, timer1 is reloaded on another
+ *        core. After the 10th callback, timer1 is not reloaded anymore.
+ *
+ *    - timer2
+ *
+ *      - At initialization, timer2 is loaded by the master core, on the
+ *        master core in "periodical" mode (time = 1 second).
+ *      - In the callback, when t=25s, it stops timer3 and reloads timer0
+ *        on the current core.
+ *
+ *    - timer3
+ *
+ *      - At initialization, timer3 is loaded by the master core, on
+ *        another core in "periodical" mode (time = 1 second).
+ *      - It is stopped at t=25s by timer2.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_timer.h>
+#include <rte_random.h>
+
+#include "test.h"
+
+#define TEST_DURATION_S 30 /* in seconds */
+#define NB_TIMER 4
+
+#define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
+
+static volatile uint64_t end_time;
+
+struct mytimerinfo {
+       struct rte_timer tim;
+       unsigned id;
+       unsigned count;
+};
+
+static struct mytimerinfo mytiminfo[NB_TIMER];
+
+static void timer_basic_cb(struct rte_timer *tim, void *arg);
+
+static void
+mytimer_reset(struct mytimerinfo *timinfo, unsigned ticks,
+             enum rte_timer_type type, unsigned tim_lcore,
+             rte_timer_cb_t fct)
+{
+       rte_timer_reset_sync(&timinfo->tim, ticks, type, tim_lcore,
+                            fct, timinfo);
+}
+
+/* timer callback for stress tests */
+static void
+timer_stress_cb(__attribute__((unused)) struct rte_timer *tim,
+               __attribute__((unused)) void *arg)
+{
+       long r;
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t hz = rte_get_hpet_hz();
+
+       if (rte_timer_pending(tim))
+               return;
+
+       r = rte_rand();
+       if ((r & 0xff) == 0) {
+               mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
+                             timer_stress_cb);
+       }
+       else if ((r & 0xff) == 1) {
+               mytimer_reset(&mytiminfo[0], hz, SINGLE,
+                             rte_get_next_lcore(lcore_id, 0, 1),
+                             timer_stress_cb);
+       }
+       else if ((r & 0xff) == 2) {
+               rte_timer_stop(&mytiminfo[0].tim);
+       }
+}
+
+static int
+timer_stress_main_loop(__attribute__((unused)) void *arg)
+{
+       uint64_t hz = rte_get_hpet_hz();
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t cur_time;
+       int64_t diff = 0;
+       long r;
+
+       while (diff >= 0) {
+
+               /* call the timer handler on each core */
+               rte_timer_manage();
+
+               /* simulate the processing of a packet
+                * (3 us = 6000 cycles at 2 Ghz) */
+               rte_delay_us(3);
+
+               /* randomly stop or reset timer */
+               r = rte_rand();
+               lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
+               if ((r & 0xff) == 0) {
+                       /* 100 us */
+                       mytimer_reset(&mytiminfo[0], hz/10000, SINGLE, lcore_id,
+                                     timer_stress_cb);
+               }
+               else if ((r & 0xff) == 1) {
+                       rte_timer_stop_sync(&mytiminfo[0].tim);
+               }
+               cur_time = rte_get_hpet_cycles();
+               diff = end_time - cur_time;
+       }
+
+       lcore_id = rte_lcore_id();
+       RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
+
+       return 0;
+}
+
+/* timer callback for basic tests */
+static void
+timer_basic_cb(struct rte_timer *tim, void *arg)
+{
+       struct mytimerinfo *timinfo = arg;
+       uint64_t hz = rte_get_hpet_hz();
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t cur_time = rte_get_hpet_cycles();
+
+       if (rte_timer_pending(tim))
+               return;
+
+       timinfo->count ++;
+
+       RTE_LOG(INFO, TESTTIMER,
+               "%"PRIu64": callback id=%u count=%u on core %u\n",
+               cur_time, timinfo->id, timinfo->count, lcore_id);
+
+       /* reload timer 0 on same core */
+       if (timinfo->id == 0 && timinfo->count < 20) {
+               mytimer_reset(timinfo, hz, SINGLE, lcore_id, timer_basic_cb);
+               return;
+       }
+
+       /* reload timer 1 on next core */
+       if (timinfo->id == 1 && timinfo->count < 10) {
+               mytimer_reset(timinfo, hz*2, SINGLE,
+                             rte_get_next_lcore(lcore_id, 0, 1),
+                             timer_basic_cb);
+               return;
+       }
+
+       /* Explicitelly stop timer 0. Once stop() called, we can even
+        * erase the content of the structure: it is not referenced
+        * anymore by any code (in case of dynamic structure, it can
+        * be freed) */
+       if (timinfo->id == 0 && timinfo->count == 20) {
+
+               /* stop_sync() is not needed, because we know that the
+                * status of timer is only modified by this core */
+               rte_timer_stop(tim);
+               memset(tim, 0xAA, sizeof(struct rte_timer));
+               return;
+       }
+
+       /* stop timer3, and restart a new timer0 (it was removed 5
+        * seconds ago) for a single shot */
+       if (timinfo->id == 2 && timinfo->count == 25) {
+               rte_timer_stop_sync(&mytiminfo[3].tim);
+
+               /* need to reinit because structure was erased with 0xAA */
+               rte_timer_init(&mytiminfo[0].tim);
+               mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
+                             timer_basic_cb);
+       }
+}
+
+static int
+timer_basic_main_loop(__attribute__((unused)) void *arg)
+{
+       uint64_t hz = rte_get_hpet_hz();
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t cur_time;
+       int64_t diff = 0;
+
+       /* launch all timers on core 0 */
+       if (lcore_id == rte_get_master_lcore()) {
+               mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
+                             timer_basic_cb);
+               mytimer_reset(&mytiminfo[1], hz*2, SINGLE, lcore_id,
+                             timer_basic_cb);
+               mytimer_reset(&mytiminfo[2], hz, PERIODICAL, lcore_id,
+                             timer_basic_cb);
+               mytimer_reset(&mytiminfo[3], hz, PERIODICAL,
+                             rte_get_next_lcore(lcore_id, 0, 1),
+                             timer_basic_cb);
+       }
+
+       while (diff >= 0) {
+
+               /* call the timer handler on each core */
+               rte_timer_manage();
+
+               /* simulate the processing of a packet
+                * (3 us = 6000 cycles at 2 Ghz) */
+               rte_delay_us(3);
+
+               cur_time = rte_get_hpet_cycles();
+               diff = end_time - cur_time;
+       }
+       RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
+
+       return 0;
+}
+
+int
+test_timer(void)
+{
+       unsigned i;
+       uint64_t cur_time;
+       uint64_t hz;
+
+       if (rte_lcore_count() < 2) {
+               printf("not enough lcores for this test\n");
+               return -1;
+       }
+
+       /* init timer */
+       for (i=0; i<NB_TIMER; i++) {
+               memset(&mytiminfo[i], 0, sizeof(struct mytimerinfo));
+               mytiminfo[i].id = i;
+               rte_timer_init(&mytiminfo[i].tim);
+       }
+
+       /* calculate the "end of test" time */
+       cur_time = rte_get_hpet_cycles();
+       hz = rte_get_hpet_hz();
+       end_time = cur_time + (hz * TEST_DURATION_S);
+
+       /* start other cores */
+       printf("Start timer stress tests (%d seconds)\n", TEST_DURATION_S);
+       rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
+       rte_eal_mp_wait_lcore();
+
+       /* stop timer 0 used for stress test */
+       rte_timer_stop_sync(&mytiminfo[0].tim);
+
+       /* calculate the "end of test" time */
+       cur_time = rte_get_hpet_cycles();
+       hz = rte_get_hpet_hz();
+       end_time = cur_time + (hz * TEST_DURATION_S);
+
+       /* start other cores */
+       printf("Start timer basic tests (%d seconds)\n", TEST_DURATION_S);
+       rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
+       rte_eal_mp_wait_lcore();
+
+       /* stop all timers */
+       for (i=0; i<NB_TIMER; i++) {
+               rte_timer_stop_sync(&mytiminfo[i].tim);
+       }
+
+       rte_timer_dump_stats();
+
+       return 0;
+}
diff --git a/app/test/test_version.c b/app/test/test_version.c
new file mode 100644 (file)
index 0000000..99b6f1c
--- /dev/null
@@ -0,0 +1,59 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+
+#include <cmdline_parse.h>
+
+#include <rte_common.h>
+#include <rte_version.h>
+
+#include "test.h"
+
+
+int
+test_version(void)
+{
+       const char *version = rte_version();
+       if (version == NULL)
+               return -1;
+       printf("Version string: '%s'\n", version);
+       if (*version == '\0' ||
+                       strncmp(version, RTE_VER_PREFIX, sizeof(RTE_VER_PREFIX)-1) != 0)
+               return -1;
+       return 0;
+}
diff --git a/config/defconfig_i686-default-linuxapp-gcc b/config/defconfig_i686-default-linuxapp-gcc
new file mode 100644 (file)
index 0000000..ed54434
--- /dev/null
@@ -0,0 +1,240 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+#
+# define executive environment
+#
+# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal
+#
+CONFIG_RTE_EXEC_ENV="linuxapp"
+CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+
+#
+# machine can define specific variables or action for a specific board
+# RTE_MACHINE can be:
+# default  nothing specific
+# native   current machine
+# atm      Intel® Atomâ„¢ microarchitecture
+# nhm      Intel® microarchitecture code name Nehalem
+# wsm      Intel® microarchitecture code name Westmere
+# snb      Intel® microarchitecture code name Sandy Bridge
+# ivb      Intel® microarchitecture code name Ivy Bridge
+#
+# Warning: if your compiler does not support the relevant -march options,
+# it will be compiled with whatever latest processor the compiler supports!
+#
+CONFIG_RTE_MACHINE="native"
+
+#
+# define the architecture we compile for.
+# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32
+#
+CONFIG_RTE_ARCH="i686"
+CONFIG_RTE_ARCH_I686=y
+
+#
+# The compiler we use.
+# Can be gcc or icc.
+#
+CONFIG_RTE_TOOLCHAIN="gcc"
+CONFIG_RTE_TOOLCHAIN_GCC=y
+
+#
+# Compile libc directory
+#
+CONFIG_RTE_LIBC=n
+
+#
+# Compile newlib as libc from source
+#
+CONFIG_RTE_LIBC_NEWLIB_SRC=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NEWLIB_BIN=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NETINCS=n
+
+#
+# Compile libgloss (newlib-stubs)
+#
+CONFIG_RTE_LIBGLOSS=n
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_LIBRTE_EAL=y
+CONFIG_RTE_MAX_LCORE=32
+CONFIG_RTE_MAX_NUMA_NODES=8
+CONFIG_RTE_MAX_MEMSEG=32
+CONFIG_RTE_MAX_MEMZONE=512
+CONFIG_RTE_MAX_TAILQ=32
+CONFIG_RTE_LOG_LEVEL=8
+CONFIG_RTE_LOG_HISTORY=256
+CONFIG_RTE_LIBEAL_USE_HPET=y
+CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
+CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
+
+#
+# Compile Environment Abstraction Layer for linux
+#
+CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y
+
+#
+# Compile Environment Abstraction Layer for Bare metal
+#
+CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n
+
+#
+# Compile generic ethernet library
+#
+CONFIG_RTE_LIBRTE_ETHER=y
+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
+CONFIG_RTE_MAX_ETHPORTS=32
+CONFIG_RTE_LIBRTE_IEEE1588=n
+
+#
+# Compile burst-oriented IGB PMD driver
+#
+CONFIG_RTE_LIBRTE_IGB_PMD=y
+CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+
+#
+# Compile burst-oriented IXGBE PMD driver
+#
+CONFIG_RTE_LIBRTE_IXGBE_PMD=y
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
+
+#
+# Do prefetch of packet data within PMD driver receive function
+#
+CONFIG_RTE_PMD_PACKET_PREFETCH=y
+
+#
+# Compile librte_ring
+#
+CONFIG_RTE_LIBRTE_RING=y
+CONFIG_RTE_LIBRTE_RING_DEBUG=n
+
+#
+# Compile librte_mempool
+#
+CONFIG_RTE_LIBRTE_MEMPOOL=y
+CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
+CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
+
+#
+# Compile librte_mbuf
+#
+CONFIG_RTE_LIBRTE_MBUF=y
+CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
+CONFIG_RTE_MBUF_SCATTER_GATHER=y
+CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
+CONFIG_RTE_PKTMBUF_HEADROOM=128
+
+#
+# Compile librte_timer
+#
+CONFIG_RTE_LIBRTE_TIMER=y
+CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
+
+#
+# Compile librte_malloc
+#
+CONFIG_RTE_LIBRTE_MALLOC=y
+CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n
+CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M
+CONFIG_RTE_MALLOC_PER_NUMA_NODE=y
+
+#
+# Compile librte_cmdline
+#
+CONFIG_RTE_LIBRTE_CMDLINE=y
+
+#
+# Compile librte_hash
+#
+CONFIG_RTE_LIBRTE_HASH=y
+CONFIG_RTE_LIBRTE_HASH_DEBUG=n
+CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n
+
+#
+# Compile librte_lpm
+#
+CONFIG_RTE_LIBRTE_LPM=y
+CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+
+#
+# Compile librte_net
+#
+CONFIG_RTE_LIBRTE_NET=y
+
+#
+# Compile the test application
+#
+CONFIG_RTE_APP_TEST=y
+
+#
+# Compile the "check includes" application
+#
+CONFIG_RTE_APP_CHKINCS=y
+
+#
+# Compile the PMD test application
+#
+CONFIG_RTE_TEST_PMD=y
+CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
+CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
+
+#
+# gcov compilation/link directives
+#
+CONFIG_RTE_LIBRTE_GCOV=n
+
+#
+# warning directives
+#
+CONFIG_RTE_INSECURE_FUNCTION_WARNING=n
diff --git a/config/defconfig_i686-default-linuxapp-icc b/config/defconfig_i686-default-linuxapp-icc
new file mode 100644 (file)
index 0000000..cb0d017
--- /dev/null
@@ -0,0 +1,230 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+#
+# define executive environment
+#
+# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal
+#
+CONFIG_RTE_EXEC_ENV="linuxapp"
+CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+
+#
+# machine can define specific variables or action for a specific board
+# RTE_MACHINE can be:
+# default  nothing specific
+# native   current machine
+# atm      Intel® Atomâ„¢ microarchitecture
+# nhm      Intel® microarchitecture code name Nehalem
+# wsm      Intel® microarchitecture code name Westmere
+# snb      Intel® microarchitecture code name Sandy Bridge
+# ivb      Intel® microarchitecture code name Ivy Bridge
+#
+# Warning: if your compiler does not support the relevant -march options,
+# it will be compiled with whatever latest processor the compiler supports!
+#
+CONFIG_RTE_MACHINE="native"
+
+#
+# define the architecture we compile for.
+# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32
+#
+CONFIG_RTE_ARCH="i686"
+CONFIG_RTE_ARCH_I686=y
+
+#
+# The compiler we use.
+# Can be gcc or icc.
+#
+CONFIG_RTE_TOOLCHAIN="icc"
+CONFIG_RTE_TOOLCHAIN_ICC=y
+
+#
+# Compile libc directory
+#
+CONFIG_RTE_LIBC=n
+
+#
+# Compile newlib as libc from source
+#
+CONFIG_RTE_LIBC_NEWLIB_SRC=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NEWLIB_BIN=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NETINCS=n
+
+#
+# Compile libgloss (newlib-stubs)
+#
+CONFIG_RTE_LIBGLOSS=n
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_LIBRTE_EAL=y
+CONFIG_RTE_MAX_LCORE=32
+CONFIG_RTE_MAX_NUMA_NODES=8
+CONFIG_RTE_MAX_MEMSEG=32
+CONFIG_RTE_MAX_MEMZONE=512
+CONFIG_RTE_MAX_TAILQ=32
+CONFIG_RTE_LOG_LEVEL=8
+CONFIG_RTE_LOG_HISTORY=256
+CONFIG_RTE_LIBEAL_USE_HPET=y
+CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
+CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
+
+#
+# Compile Environment Abstraction Layer for linux
+#
+CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y
+
+#
+# Compile Environment Abstraction Layer for Bare metal
+#
+CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n
+
+#
+# Compile generic ethernet library
+#
+CONFIG_RTE_LIBRTE_ETHER=y
+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
+CONFIG_RTE_MAX_ETHPORTS=32
+CONFIG_RTE_LIBRTE_IEEE1588=n
+
+#
+# Compile burst-oriented IGB PMD driver
+#
+CONFIG_RTE_LIBRTE_IGB_PMD=y
+CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+
+#
+# Compile burst-oriented IXGBE PMD driver
+#
+CONFIG_RTE_LIBRTE_IXGBE_PMD=y
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
+
+#
+# Do prefetch of packet data within PMD driver receive function
+#
+CONFIG_RTE_PMD_PACKET_PREFETCH=y
+
+#
+# Compile librte_ring
+#
+CONFIG_RTE_LIBRTE_RING=y
+CONFIG_RTE_LIBRTE_RING_DEBUG=n
+
+#
+# Compile librte_mempool
+#
+CONFIG_RTE_LIBRTE_MEMPOOL=y
+CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
+CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
+
+#
+# Compile librte_mbuf
+#
+CONFIG_RTE_LIBRTE_MBUF=y
+CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
+CONFIG_RTE_MBUF_SCATTER_GATHER=y
+CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
+CONFIG_RTE_PKTMBUF_HEADROOM=128
+
+#
+# Compile librte_timer
+#
+CONFIG_RTE_LIBRTE_TIMER=y
+CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
+
+#
+# Compile librte_malloc
+#
+CONFIG_RTE_LIBRTE_MALLOC=y
+CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n
+CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M
+CONFIG_RTE_MALLOC_PER_NUMA_NODE=y
+
+#
+# Compile librte_cmdline
+#
+CONFIG_RTE_LIBRTE_CMDLINE=y
+
+#
+# Compile librte_hash
+#
+CONFIG_RTE_LIBRTE_HASH=y
+CONFIG_RTE_LIBRTE_HASH_DEBUG=n
+CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n
+
+#
+# Compile librte_lpm
+#
+CONFIG_RTE_LIBRTE_LPM=y
+CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+
+#
+# Compile librte_net
+#
+CONFIG_RTE_LIBRTE_NET=y
+
+#
+# Compile the test application
+#
+CONFIG_RTE_APP_TEST=y
+
+#
+# Compile the "check includes" application
+#
+CONFIG_RTE_APP_CHKINCS=y
+
+#
+# Compile the PMD test application
+#
+CONFIG_RTE_TEST_PMD=y
+CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
+CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
diff --git a/config/defconfig_x86_64-default-linuxapp-gcc b/config/defconfig_x86_64-default-linuxapp-gcc
new file mode 100644 (file)
index 0000000..3555187
--- /dev/null
@@ -0,0 +1,240 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+#
+# define executive environment
+#
+# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal
+#
+CONFIG_RTE_EXEC_ENV="linuxapp"
+CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+
+#
+# machine can define specific variables or action for a specific board
+# RTE_MACHINE can be:
+# default  nothing specific
+# native   current machine
+# atm      Intel® Atomâ„¢ microarchitecture
+# nhm      Intel® microarchitecture code name Nehalem
+# wsm      Intel® microarchitecture code name Westmere
+# snb      Intel® microarchitecture code name Sandy Bridge
+# ivb      Intel® microarchitecture code name Ivy Bridge
+#
+# Warning: if your compiler does not support the relevant -march options,
+# it will be compiled with whatever latest processor the compiler supports!
+#
+CONFIG_RTE_MACHINE="native"
+
+#
+# define the architecture we compile for.
+# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32
+#
+CONFIG_RTE_ARCH="x86_64"
+CONFIG_RTE_ARCH_X86_64=y
+
+#
+# The compiler we use.
+# Can be gcc or icc.
+#
+CONFIG_RTE_TOOLCHAIN="gcc"
+CONFIG_RTE_TOOLCHAIN_GCC=y
+
+#
+# Compile libc directory
+#
+CONFIG_RTE_LIBC=n
+
+#
+# Compile newlib as libc from source
+#
+CONFIG_RTE_LIBC_NEWLIB_SRC=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NEWLIB_BIN=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NETINCS=n
+
+#
+# Compile libgloss (newlib-stubs)
+#
+CONFIG_RTE_LIBGLOSS=n
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_LIBRTE_EAL=y
+CONFIG_RTE_MAX_LCORE=32
+CONFIG_RTE_MAX_NUMA_NODES=8
+CONFIG_RTE_MAX_MEMSEG=32
+CONFIG_RTE_MAX_MEMZONE=512
+CONFIG_RTE_MAX_TAILQ=32
+CONFIG_RTE_LOG_LEVEL=8
+CONFIG_RTE_LOG_HISTORY=256
+CONFIG_RTE_LIBEAL_USE_HPET=y
+CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
+CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
+
+#
+# Compile Environment Abstraction Layer for linux
+#
+CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y
+
+#
+# Compile Environment Abstraction Layer for Bare metal
+#
+CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n
+
+#
+# Compile generic ethernet library
+#
+CONFIG_RTE_LIBRTE_ETHER=y
+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
+CONFIG_RTE_MAX_ETHPORTS=32
+CONFIG_RTE_LIBRTE_IEEE1588=n
+
+#
+# Compile burst-oriented IGB PMD driver
+#
+CONFIG_RTE_LIBRTE_IGB_PMD=y
+CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+
+#
+# Compile burst-oriented IXGBE PMD driver
+#
+CONFIG_RTE_LIBRTE_IXGBE_PMD=y
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
+
+#
+# Do prefetch of packet data within PMD driver receive function
+#
+CONFIG_RTE_PMD_PACKET_PREFETCH=y
+
+#
+# Compile librte_ring
+#
+CONFIG_RTE_LIBRTE_RING=y
+CONFIG_RTE_LIBRTE_RING_DEBUG=n
+
+#
+# Compile librte_mempool
+#
+CONFIG_RTE_LIBRTE_MEMPOOL=y
+CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
+CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
+
+#
+# Compile librte_mbuf
+#
+CONFIG_RTE_LIBRTE_MBUF=y
+CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
+CONFIG_RTE_MBUF_SCATTER_GATHER=y
+CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
+CONFIG_RTE_PKTMBUF_HEADROOM=128
+
+#
+# Compile librte_timer
+#
+CONFIG_RTE_LIBRTE_TIMER=y
+CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
+
+#
+# Compile librte_malloc
+#
+CONFIG_RTE_LIBRTE_MALLOC=y
+CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n
+CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M
+CONFIG_RTE_MALLOC_PER_NUMA_NODE=y
+
+#
+# Compile librte_cmdline
+#
+CONFIG_RTE_LIBRTE_CMDLINE=y
+
+#
+# Compile librte_hash
+#
+CONFIG_RTE_LIBRTE_HASH=y
+CONFIG_RTE_LIBRTE_HASH_DEBUG=n
+CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n
+
+#
+# Compile librte_lpm
+#
+CONFIG_RTE_LIBRTE_LPM=y
+CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+
+#
+# Compile librte_net
+#
+CONFIG_RTE_LIBRTE_NET=y
+
+#
+# Compile the test application
+#
+CONFIG_RTE_APP_TEST=y
+
+#
+# Compile the "check includes" application
+#
+CONFIG_RTE_APP_CHKINCS=y
+
+#
+# Compile the PMD test application
+#
+CONFIG_RTE_TEST_PMD=y
+CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
+CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
+
+#
+# gcov compilation/link directives
+#
+CONFIG_RTE_LIBRTE_GCOV=n
+
+#
+# warning directives
+#
+CONFIG_RTE_INSECURE_FUNCTION_WARNING=n
diff --git a/config/defconfig_x86_64-default-linuxapp-icc b/config/defconfig_x86_64-default-linuxapp-icc
new file mode 100644 (file)
index 0000000..f527f53
--- /dev/null
@@ -0,0 +1,230 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+#
+# define executive environment
+#
+# CONFIG_RTE_EXEC_ENV can be linuxapp, baremetal
+#
+CONFIG_RTE_EXEC_ENV="linuxapp"
+CONFIG_RTE_EXEC_ENV_LINUXAPP=y
+
+#
+# machine can define specific variables or action for a specific board
+# RTE_MACHINE can be:
+# default  nothing specific
+# native   current machine
+# atm      Intel® Atomâ„¢ microarchitecture
+# nhm      Intel® microarchitecture code name Nehalem
+# wsm      Intel® microarchitecture code name Westmere
+# snb      Intel® microarchitecture code name Sandy Bridge
+# ivb      Intel® microarchitecture code name Ivy Bridge
+#
+# Warning: if your compiler does not support the relevant -march options,
+# it will be compiled with whatever latest processor the compiler supports!
+#
+CONFIG_RTE_MACHINE="native"
+
+#
+# define the architecture we compile for.
+# CONFIG_RTE_ARCH can be i686, x86_64, x86_64_32
+#
+CONFIG_RTE_ARCH="x86_64"
+CONFIG_RTE_ARCH_X86_64=y
+
+#
+# The compiler we use.
+# Can be gcc or icc.
+#
+CONFIG_RTE_TOOLCHAIN="icc"
+CONFIG_RTE_TOOLCHAIN_ICC=y
+
+#
+# Compile libc directory
+#
+CONFIG_RTE_LIBC=n
+
+#
+# Compile newlib as libc from source
+#
+CONFIG_RTE_LIBC_NEWLIB_SRC=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NEWLIB_BIN=n
+
+#
+# Use binary newlib
+#
+CONFIG_RTE_LIBC_NETINCS=n
+
+#
+# Compile libgloss (newlib-stubs)
+#
+CONFIG_RTE_LIBGLOSS=n
+
+#
+# Compile Environment Abstraction Layer
+#
+CONFIG_RTE_LIBRTE_EAL=y
+CONFIG_RTE_MAX_LCORE=32
+CONFIG_RTE_MAX_NUMA_NODES=8
+CONFIG_RTE_MAX_MEMSEG=32
+CONFIG_RTE_MAX_MEMZONE=512
+CONFIG_RTE_MAX_TAILQ=32
+CONFIG_RTE_LOG_LEVEL=8
+CONFIG_RTE_LOG_HISTORY=256
+CONFIG_RTE_LIBEAL_USE_HPET=y
+CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID=n
+CONFIG_RTE_EAL_ALWAYS_PANIC_ON_ERROR=n
+
+#
+# Compile Environment Abstraction Layer for linux
+#
+CONFIG_RTE_LIBRTE_EAL_LINUXAPP=y
+
+#
+# Compile Environment Abstraction Layer for Bare metal
+#
+CONFIG_RTE_LIBRTE_EAL_BAREMETAL=n
+
+#
+# Compile generic ethernet library
+#
+CONFIG_RTE_LIBRTE_ETHER=y
+CONFIG_RTE_LIBRTE_ETHDEV_DEBUG=n
+CONFIG_RTE_MAX_ETHPORTS=32
+CONFIG_RTE_LIBRTE_IEEE1588=n
+
+#
+# Compile burst-oriented IGB PMD driver
+#
+CONFIG_RTE_LIBRTE_IGB_PMD=y
+CONFIG_RTE_LIBRTE_IGB_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IGB_DEBUG_DRIVER=n
+
+#
+# Compile burst-oriented IXGBE PMD driver
+#
+CONFIG_RTE_LIBRTE_IXGBE_PMD=y
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_INIT=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_RX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_TX_FREE=n
+CONFIG_RTE_LIBRTE_IXGBE_DEBUG_DRIVER=n
+
+#
+# Do prefetch of packet data within PMD driver receive function
+#
+CONFIG_RTE_PMD_PACKET_PREFETCH=y
+
+#
+# Compile librte_ring
+#
+CONFIG_RTE_LIBRTE_RING=y
+CONFIG_RTE_LIBRTE_RING_DEBUG=n
+
+#
+# Compile librte_mempool
+#
+CONFIG_RTE_LIBRTE_MEMPOOL=y
+CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE=512
+CONFIG_RTE_LIBRTE_MEMPOOL_DEBUG=n
+
+#
+# Compile librte_mbuf
+#
+CONFIG_RTE_LIBRTE_MBUF=y
+CONFIG_RTE_LIBRTE_MBUF_DEBUG=n
+CONFIG_RTE_MBUF_SCATTER_GATHER=y
+CONFIG_RTE_MBUF_REFCNT_ATOMIC=y
+CONFIG_RTE_PKTMBUF_HEADROOM=128
+
+#
+# Compile librte_timer
+#
+CONFIG_RTE_LIBRTE_TIMER=y
+CONFIG_RTE_LIBRTE_TIMER_DEBUG=n
+
+#
+# Compile librte_malloc
+#
+CONFIG_RTE_LIBRTE_MALLOC=y
+CONFIG_RTE_LIBRTE_MALLOC_DEBUG=n
+CONFIG_RTE_MALLOC_MEMZONE_SIZE=11M
+CONFIG_RTE_MALLOC_PER_NUMA_NODE=y
+
+#
+# Compile librte_cmdline
+#
+CONFIG_RTE_LIBRTE_CMDLINE=y
+
+#
+# Compile librte_hash
+#
+CONFIG_RTE_LIBRTE_HASH=y
+CONFIG_RTE_LIBRTE_HASH_DEBUG=n
+CONFIG_RTE_LIBRTE_HASH_USE_MEMZONE=n
+
+#
+# Compile librte_lpm
+#
+CONFIG_RTE_LIBRTE_LPM=y
+CONFIG_RTE_LIBRTE_LPM_DEBUG=n
+
+#
+# Compile librte_net
+#
+CONFIG_RTE_LIBRTE_NET=y
+
+#
+# Compile the test application
+#
+CONFIG_RTE_APP_TEST=y
+
+#
+# Compile the "check includes" application
+#
+CONFIG_RTE_APP_CHKINCS=y
+
+#
+# Compile the PMD test application
+#
+CONFIG_RTE_TEST_PMD=y
+CONFIG_RTE_TEST_PMD_RECORD_CORE_CYCLES=n
+CONFIG_RTE_TEST_PMD_RECORD_BURST_STATS=n
diff --git a/examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf b/examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..796741f
Binary files /dev/null and b/examples/cmdline/482246_CmdLine_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/cmdline/Makefile b/examples/cmdline/Makefile
new file mode 100644 (file)
index 0000000..0d63d19
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = cmdline
+
+# all source are stored in SRCS-y
+SRCS-y := main.c commands.c parse_obj_list.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/cmdline/commands.c b/examples/cmdline/commands.c
new file mode 100644 (file)
index 0000000..5ce239c
--- /dev/null
@@ -0,0 +1,282 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <termios.h>
+#ifndef __linux__
+#include <net/socket.h>
+#endif
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_ipaddr.h>
+#include <cmdline_parse_num.h>
+#include <cmdline_parse_string.h>
+#include <cmdline.h>
+
+#include <rte_string_fns.h>
+
+#include "parse_obj_list.h"
+
+struct object_list global_obj_list;
+
+/* not defined under linux */
+#ifndef NIPQUAD
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+#define NIPQUAD(addr)                          \
+       (unsigned)((unsigned char *)&addr)[0],  \
+       (unsigned)((unsigned char *)&addr)[1],  \
+       (unsigned)((unsigned char *)&addr)[2],  \
+       (unsigned)((unsigned char *)&addr)[3]
+#endif
+
+#ifndef NIP6
+#define NIP6_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x"
+#define NIP6(addr)                                     \
+       (unsigned)((addr).s6_addr[0]),                  \
+       (unsigned)((addr).s6_addr[1]),                  \
+       (unsigned)((addr).s6_addr[2]),                  \
+       (unsigned)((addr).s6_addr[3]),                  \
+       (unsigned)((addr).s6_addr[4]),                  \
+       (unsigned)((addr).s6_addr[5]),                  \
+       (unsigned)((addr).s6_addr[6]),                  \
+       (unsigned)((addr).s6_addr[7]),                  \
+       (unsigned)((addr).s6_addr[8]),                  \
+       (unsigned)((addr).s6_addr[9]),                  \
+       (unsigned)((addr).s6_addr[10]),                 \
+       (unsigned)((addr).s6_addr[11]),                 \
+       (unsigned)((addr).s6_addr[12]),                 \
+       (unsigned)((addr).s6_addr[13]),                 \
+       (unsigned)((addr).s6_addr[14]),                 \
+       (unsigned)((addr).s6_addr[15])
+#endif
+
+
+/**********************************************************/
+
+struct cmd_obj_del_show_result {
+       cmdline_fixed_string_t action;
+       struct object *obj;
+};
+
+static void cmd_obj_del_show_parsed(void *parsed_result,
+                                   struct cmdline *cl,
+                                   __attribute__((unused)) void *data)
+{
+       struct cmd_obj_del_show_result *res = parsed_result;
+       char ip_str[INET6_ADDRSTRLEN];
+
+       if (res->obj->ip.family == AF_INET)
+               rte_snprintf(ip_str, sizeof(ip_str), NIPQUAD_FMT,
+                        NIPQUAD(res->obj->ip.addr.ipv4));
+       else
+               rte_snprintf(ip_str, sizeof(ip_str), NIP6_FMT,
+                        NIP6(res->obj->ip.addr.ipv6));
+
+       if (strcmp(res->action, "del") == 0) {
+               SLIST_REMOVE(&global_obj_list, res->obj, object, next);
+               cmdline_printf(cl, "Object %s removed, ip=%s\n",
+                              res->obj->name, ip_str);
+               free(res->obj);
+       }
+       else if (strcmp(res->action, "show") == 0) {
+               cmdline_printf(cl, "Object %s, ip=%s\n",
+                              res->obj->name, ip_str);
+       }
+}
+
+cmdline_parse_token_string_t cmd_obj_action =
+       TOKEN_STRING_INITIALIZER(struct cmd_obj_del_show_result,
+                                action, "show#del");
+parse_token_obj_list_t cmd_obj_obj =
+       TOKEN_OBJ_LIST_INITIALIZER(struct cmd_obj_del_show_result, obj,
+                                  &global_obj_list);
+
+cmdline_parse_inst_t cmd_obj_del_show = {
+       .f = cmd_obj_del_show_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "Show/del an object",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_obj_action,
+               (void *)&cmd_obj_obj,
+               NULL,
+       },
+};
+
+/**********************************************************/
+
+struct cmd_obj_add_result {
+       cmdline_fixed_string_t action;
+       cmdline_fixed_string_t name;
+       cmdline_ipaddr_t ip;
+};
+
+static void cmd_obj_add_parsed(void *parsed_result,
+                              struct cmdline *cl,
+                              __attribute__((unused)) void *data)
+{
+       struct cmd_obj_add_result *res = parsed_result;
+       struct object *o;
+       char ip_str[INET6_ADDRSTRLEN];
+
+       SLIST_FOREACH(o, &global_obj_list, next) {
+               if (!strcmp(res->name, o->name)) {
+                       cmdline_printf(cl, "Object %s already exist\n", res->name);
+                       return;
+               }
+               break;
+       }
+
+       o = malloc(sizeof(*o));
+       if (!o) {
+               cmdline_printf(cl, "mem error\n");
+               return;
+       }
+       rte_snprintf(o->name, sizeof(o->name), "%s", res->name);
+       o->ip = res->ip;
+       SLIST_INSERT_HEAD(&global_obj_list, o, next);
+
+       if (o->ip.family == AF_INET)
+               rte_snprintf(ip_str, sizeof(ip_str), NIPQUAD_FMT,
+                        NIPQUAD(o->ip.addr.ipv4));
+       else
+               rte_snprintf(ip_str, sizeof(ip_str), NIP6_FMT,
+                        NIP6(o->ip.addr.ipv6));
+
+       cmdline_printf(cl, "Object %s added, ip=%s\n",
+                      o->name, ip_str);
+}
+
+cmdline_parse_token_string_t cmd_obj_action_add =
+       TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, action, "add");
+cmdline_parse_token_string_t cmd_obj_name =
+       TOKEN_STRING_INITIALIZER(struct cmd_obj_add_result, name, NULL);
+cmdline_parse_token_ipaddr_t cmd_obj_ip =
+       TOKEN_IPADDR_INITIALIZER(struct cmd_obj_add_result, ip);
+
+cmdline_parse_inst_t cmd_obj_add = {
+       .f = cmd_obj_add_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "Add an object (name, val)",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_obj_action_add,
+               (void *)&cmd_obj_name,
+               (void *)&cmd_obj_ip,
+               NULL,
+       },
+};
+
+/**********************************************************/
+
+struct cmd_help_result {
+       cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+                           struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       cmdline_printf(cl,
+                      "Demo example of command line interface in RTE\n\n"
+                      "This is a readline-like interface that can be used to\n"
+                      "debug your RTE application. It supports some features\n"
+                      "of GNU readline like completion, cut/paste, and some\n"
+                      "other special bindings.\n\n"
+                      "This demo shows how rte_cmdline library can be\n"
+                      "extended to handle a list of objects. There are\n"
+                      "3 commands:\n"
+                      "- add obj_name IP\n"
+                      "- del obj_name\n"
+                      "- show obj_name\n\n");
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+       TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+       .f = cmd_help_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "show help",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_help_help,
+               NULL,
+       },
+};
+
+
+/**********************************************************/
+/**********************************************************/
+/****** CONTEXT (list of instruction) */
+
+cmdline_parse_ctx_t main_ctx[] = {
+       (cmdline_parse_inst_t *)&cmd_obj_del_show,
+       (cmdline_parse_inst_t *)&cmd_obj_add,
+       (cmdline_parse_inst_t *)&cmd_help,
+       NULL,
+};
+
diff --git a/examples/cmdline/commands.h b/examples/cmdline/commands.h
new file mode 100644 (file)
index 0000000..b13a25b
--- /dev/null
@@ -0,0 +1,41 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _COMMANDS_H_
+#define _COMMANDS_H_
+
+extern cmdline_parse_ctx_t main_ctx[];
+
+#endif /* _COMMANDS_H_ */
diff --git a/examples/cmdline/main.c b/examples/cmdline/main.c
new file mode 100644 (file)
index 0000000..c78c1cc
--- /dev/null
@@ -0,0 +1,100 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <termios.h>
+#include <sys/queue.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+
+#include "commands.h"
+#include "main.h"
+
+int MAIN(int argc, char **argv)
+{
+       int ret;
+       struct cmdline *cl;
+
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_panic("Cannot init EAL\n");
+
+       cl = cmdline_stdin_new(main_ctx, "example> ");
+       if (cl == NULL)
+               rte_panic("Cannot create cmdline instance\n");
+       cmdline_interact(cl);
+       cmdline_stdin_exit(cl);
+
+       return 0;
+}
diff --git a/examples/cmdline/main.h b/examples/cmdline/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/cmdline/parse_obj_list.c b/examples/cmdline/parse_obj_list.c
new file mode 100644 (file)
index 0000000..7aa9f9e
--- /dev/null
@@ -0,0 +1,164 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <ctype.h>
+#include <string.h>
+#include <netinet/in.h>
+
+#include <cmdline_parse.h>
+#include <cmdline_parse_ipaddr.h>
+
+#include <rte_string_fns.h>
+
+#include "parse_obj_list.h"
+
+/* This file is an example of extension of libcmdline. It provides an
+ * example of objects stored in a list. */
+
+struct cmdline_token_ops token_obj_list_ops = {
+       .parse = parse_obj_list,
+       .complete_get_nb = complete_get_nb_obj_list,
+       .complete_get_elt = complete_get_elt_obj_list,
+       .get_help = get_help_obj_list,
+};
+
+int
+parse_obj_list(cmdline_parse_token_hdr_t *tk, const char *buf, void *res)
+{
+       struct token_obj_list *tk2 = (struct token_obj_list *)tk;
+       struct token_obj_list_data *tkd = &tk2->obj_list_data;
+       struct object *o;
+       unsigned int token_len = 0;
+
+       if (*buf == 0)
+               return -1;
+
+       while(!cmdline_isendoftoken(buf[token_len]))
+               token_len++;
+
+       SLIST_FOREACH(o, tkd->list, next) {
+               if (token_len != strnlen(o->name, OBJ_NAME_LEN_MAX))
+                       continue;
+               if (strncmp(buf, o->name, token_len))
+                       continue;
+               break;
+       }
+       if (!o) /* not found */
+               return -1;
+
+       /* store the address of object in structure */
+       if (res)
+               *(struct object **)res = o;
+
+       return token_len;
+}
+
+int complete_get_nb_obj_list(cmdline_parse_token_hdr_t *tk)
+{
+       struct token_obj_list *tk2 = (struct token_obj_list *)tk;
+       struct token_obj_list_data *tkd = &tk2->obj_list_data;
+       struct object *o;
+       int ret = 0;
+
+       SLIST_FOREACH(o, tkd->list, next) {
+               ret ++;
+       }
+       return ret;
+}
+
+int complete_get_elt_obj_list(cmdline_parse_token_hdr_t *tk,
+                             int idx, char *dstbuf, unsigned int size)
+{
+       struct token_obj_list *tk2 = (struct token_obj_list *)tk;
+       struct token_obj_list_data *tkd = &tk2->obj_list_data;
+       struct object *o;
+       int i = 0;
+       unsigned len;
+
+       SLIST_FOREACH(o, tkd->list, next) {
+               if (i++ == idx)
+                       break;
+       }
+       if (!o)
+               return -1;
+
+       len = strnlen(o->name, OBJ_NAME_LEN_MAX);
+       if ((len + 1) > size)
+               return -1;
+
+       if (dstbuf)
+               rte_snprintf(dstbuf, size, "%s", o->name);
+
+       return 0;
+}
+
+
+int get_help_obj_list(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+                     char *dstbuf, unsigned int size)
+{
+       rte_snprintf(dstbuf, size, "Obj-List");
+       return 0;
+}
diff --git a/examples/cmdline/parse_obj_list.h b/examples/cmdline/parse_obj_list.h
new file mode 100644 (file)
index 0000000..eb25fd7
--- /dev/null
@@ -0,0 +1,113 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_OBJ_LIST_H_
+#define _PARSE_OBJ_LIST_H_
+
+/* This file is an example of extension of libcmdline. It provides an
+ * example of objects stored in a list. */
+
+#include <sys/queue.h>
+#include <cmdline_parse.h>
+
+#define OBJ_NAME_LEN_MAX 64
+
+struct object {
+       SLIST_ENTRY(object) next;
+       char name[OBJ_NAME_LEN_MAX];
+       cmdline_ipaddr_t ip;
+};
+
+/* define struct object_list */
+SLIST_HEAD(object_list, object);
+
+/* data is a pointer to a list */
+struct token_obj_list_data {
+       struct object_list *list;
+};
+
+struct token_obj_list {
+       struct cmdline_token_hdr hdr;
+       struct token_obj_list_data obj_list_data;
+};
+typedef struct token_obj_list parse_token_obj_list_t;
+
+extern struct cmdline_token_ops token_obj_list_ops;
+
+int parse_obj_list(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res);
+int complete_get_nb_obj_list(cmdline_parse_token_hdr_t *tk);
+int complete_get_elt_obj_list(cmdline_parse_token_hdr_t *tk, int idx,
+                             char *dstbuf, unsigned int size);
+int get_help_obj_list(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size);
+
+#define TOKEN_OBJ_LIST_INITIALIZER(structure, field, obj_list_ptr)  \
+{                                                                  \
+       .hdr = {                                                    \
+               .ops = &token_obj_list_ops,                         \
+               .offset = offsetof(structure, field),               \
+       },                                                          \
+               .obj_list_data = {                                  \
+               .list = obj_list_ptr,                               \
+       },                                                          \
+}
+
+#endif /* _PARSE_OBJ_LIST_H_ */
diff --git a/examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf b/examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..3e84153
Binary files /dev/null and b/examples/dpdk_qat/497691_QuickAssist_in_DPDK_Env_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/dpdk_qat/Makefile b/examples/dpdk_qat/Makefile
new file mode 100644 (file)
index 0000000..f1b0cbb
--- /dev/null
@@ -0,0 +1,81 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+ifeq ($(ICP_ROOT),)
+$(error "Please define ICP_ROOT environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(error This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+endif
+
+ifneq ($(CONFIG_RTE_ARCH),"x86_64")
+$(error This application can only operate in a x86_64 environment, \
+please change the definition of the RTE_TARGET environment variable)
+endif
+
+# binary name
+APP = dpdk_qat
+
+# all source are stored in SRCS-y
+SRCS-y := main.c crypto.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(ICP_ROOT)/quickassist/include \
+               -I$(ICP_ROOT)/quickassist/include/lac \
+               -I$(ICP_ROOT)/quickassist/lookaside/access_layer/include
+
+LDLIBS += -L$(ICP_ROOT)/build
+LDLIBS += $(ICP_ROOT)/build/icp_qa_al.a \
+               -losal \
+               -ladf_proxy \
+               -lcrypto
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev0.conf
new file mode 100644 (file)
index 0000000..6949b43
--- /dev/null
@@ -0,0 +1,537 @@
+#########################################################################
+#
+# @par
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#########################################################################
+########################################################
+#
+# This file is the configuration for a single dh89xxcc_qa
+# device.
+#
+# Each device has up to two accelerators.
+# - The client may load balance between these
+#   accelerators.
+# Each accelerator has 8 independent ring banks.
+# - The interrupt for each can be directed to a
+#   specific core.
+# Each ring bank as 16 rings (hardware assisted queues).
+#
+########################################################
+
+##############################################
+# General Section
+##############################################
+
+[GENERAL]
+ServicesEnabled = cy0;cy1
+
+# Look Aside Cryptographic Configuration
+cyHmacAuthMode = 1
+
+# Look Aside Compression Configuration
+dcTotalSRAMAvailable = 0
+dcSRAMPerInstance = 0
+
+# Firmware Location Configuration
+Firmware_UofPath = uof_firmware.bin
+Firmware_MmpPath = mmp_firmware.bin
+
+# QAT Parameters
+Accel0AdminBankNumber = 0
+Accel0AcceleratorNumber = 0
+Accel0AdminTx = 0
+Accel0AdminRx = 1
+
+Accel1AcceleratorNumber = 1
+Accel1AdminBankNumber = 0
+Accel1AdminTx = 0
+Accel1AdminRx = 1
+
+#Statistics, valid values: 1,0
+statsGeneral = 1
+statsDc = 1
+statsDh = 1
+statsDrbg = 1
+statsDsa = 1
+statsEcc = 1
+statsKeyGen = 1
+statsLn = 1
+statsPrime = 1
+statsRsa = 1
+statsSym = 1
+
+#Debug feature, if set to 1 it enables additional entries in /proc filesystem
+ProcDebug = 1
+
+
+################################################
+#
+# Hardware Access Ring Bank Configuration
+# Each Accelerator has 8 ring banks (0-7)
+# If the OS supports MSI-X, each ring bank has an
+# steerable MSI-x interrupt which may be
+# affinitized to a particular node/core.
+#
+################################################
+
+
+[Accelerator0]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 0
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 2
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 4
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 6
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 16
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 18
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 20
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 22
+Bank7InterruptCoalescingNumResponses = 0
+
+[Accelerator1]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 1
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 3
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 5
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 7
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 17
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 19
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 21
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 23
+Bank7InterruptCoalescingNumResponses = 0
+
+#######################################################
+#
+# Logical Instances Section
+# A logical instance allows each address domain
+# (kernel space and individual user space processes)
+# to configure rings (i.e. hardware assisted queues)
+# to be used by that address domain and to define the
+# behavior of that ring.
+#
+# The address domains are in the following format
+# - For kernel address domains
+#       [KERNEL]
+# - For user process address domains
+#   [xxxxx]
+#   Where xxxxx may be any ascii value which uniquely identifies
+#   the user mode process.
+#   To allow the driver correctly configure the
+#   logical instances associated with this user process,
+#   the process must call the icp_sal_userStart(...)
+#   passing the xxxxx string during process initialisation.
+#   When the user space process is finish it must call
+#   icp_sal_userStop(...) to free resources.
+#   If there are multiple devices present in the system all conf
+#   files that describe the devices must have the same address domain
+#   sections even if the address domain does not configure any instances
+#   on that particular device. So if icp_sal_userStart("xxxxx") is called
+#   then user process address domain [xxxxx] needs to be present in all
+#   conf files for all devices in the system.
+#
+# Items configurable by a logical instance are:
+# - Name of the logical instance
+# - The accelerator associated with this logical
+#   instance
+# - The ring bank associated with this logical
+#   instance.
+# - The response mode associated wth this logical instance (0
+#   for IRQ or 1 for polled).
+# - The ring for receiving and the ring for transmitting.
+# - The number of concurrent requests supported by a pair of
+#   rings on this instance (tx + rx). Note this number affects
+#   the amount of memory allocated by the driver. Also
+#   Bank<n>InterruptCoalescingNumResponses is only supported for
+#   number of concurrent requests equal to 512.
+#
+# Note: Logical instances may not share the same ring, but
+#           may share a ring bank.
+#
+# The format of the logical instances are:
+# - For crypto:
+#               Cy<n>Name = "xxxx"
+#               Cy<n>AcceleratorNumber = 0|1
+#               Cy<n>BankNumber = 0-7
+#               Cy<n>IsPolled = 0|1
+#               Cy<n>NumConcurrentSymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>NumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>RingAsymTx = 0-15
+#               Cy<n>RingAsymRx = 0-15
+#               Cy<n>RingSymTxHi = 0-15
+#               Cy<n>RingSymRxHi = 0-15
+#               Cy<n>RingSymRx = 0-15
+#
+# - For Data Compression
+#               Dc<n>Name = "xxxx"
+#               Dc<n>AcceleratorNumber = 0|1
+#               Dc<n>BankNumber = 0-7
+#               Dc<n>IsPolled = 0|1
+#               Dc<n>NumConcurrentRequests = 64|128|256|512|1024|2048|4096
+#               Dc<n>RingTx = 0-15
+#               Dc<n>RingRx = 0-15
+#
+# Where:
+#       - n is the number of this logical instance starting at 0.
+#       - xxxx may be any ascii value which identifies the logical instance.
+#
+########################################################
+
+##############################################
+# Kernel Instances Section
+##############################################
+[KERNEL]
+NumberCyInstances = 0
+NumberDcInstances = 0
+
+
+##############################################
+# User Process Instance Section
+##############################################
+[SSL]
+NumberCyInstances = 16
+NumberDcInstances = 0
+
+# Crypto - User instance #0
+Cy0Name = "SSL0"
+Cy0IsPolled = 1
+Cy0AcceleratorNumber = 0
+Cy0ExecutionEngine = 0
+Cy0BankNumber = 0
+Cy0NumConcurrentSymRequests = 512
+Cy0NumConcurrentAsymRequests = 64
+
+Cy0RingAsymTx =  2
+Cy0RingAsymRx =  3
+Cy0RingSymTxHi = 4
+Cy0RingSymRxHi = 5
+Cy0RingSymTxLo = 6
+Cy0RingSymRxLo = 7
+
+# Crypto - User instance #1
+Cy1Name = "SSL1"
+Cy1AcceleratorNumber = 1
+Cy1ExecutionEngine = 0
+Cy1BankNumber = 0
+Cy1IsPolled = 1
+Cy1NumConcurrentSymRequests = 512
+Cy1NumConcurrentAsymRequests = 64
+
+Cy1RingAsymTx =  2
+Cy1RingAsymRx =  3
+Cy1RingSymTxHi = 4
+Cy1RingSymRxHi = 5
+Cy1RingSymTxLo = 6
+Cy1RingSymRxLo = 7
+
+# Crypto - User instance #2
+Cy2Name = "SSL2"
+Cy2IsPolled= 1
+Cy2AcceleratorNumber = 0
+Cy2ExecutionEngine = 1
+Cy2BankNumber = 1
+Cy2NumConcurrentSymRequests = 512
+Cy2NumConcurrentAsymRequests = 64
+
+Cy2RingAsymTx =  0
+Cy2RingAsymRx =  1
+Cy2RingSymTxHi = 2
+Cy2RingSymRxHi = 3
+Cy2RingSymTxLo = 4
+Cy2RingSymRxLo = 5
+
+# Crypto - User instance #3
+Cy3Name = "SSL3"
+Cy3AcceleratorNumber = 1
+Cy3ExecutionEngine = 1
+Cy3BankNumber = 1
+Cy3IsPolled = 1
+Cy3NumConcurrentSymRequests = 512
+Cy3NumConcurrentAsymRequests = 64
+
+Cy3RingAsymTx =  0
+Cy3RingAsymRx =  1
+Cy3RingSymTxHi = 2
+Cy3RingSymRxHi = 3
+Cy3RingSymTxLo = 4
+Cy3RingSymRxLo = 5
+
+
+# Crypto - User instance #4
+Cy4Name = "SSL4"
+Cy4IsPolled= 1
+Cy4AcceleratorNumber = 0
+Cy4ExecutionEngine = 0
+Cy4BankNumber = 2
+Cy4NumConcurrentSymRequests = 512
+Cy4NumConcurrentAsymRequests = 64
+
+Cy4RingAsymTx =  0
+Cy4RingAsymRx =  1
+Cy4RingSymTxHi = 2
+Cy4RingSymRxHi = 3
+Cy4RingSymTxLo = 4
+Cy4RingSymRxLo = 5
+
+# Crypto - User instance #5
+Cy5Name = "SSL5"
+Cy5AcceleratorNumber = 1
+Cy5ExecutionEngine = 0
+Cy5BankNumber = 2
+Cy5IsPolled = 1
+Cy5NumConcurrentSymRequests = 512
+Cy5NumConcurrentAsymRequests = 64
+
+Cy5RingAsymTx =  0
+Cy5RingAsymRx =  1
+Cy5RingSymTxHi = 2
+Cy5RingSymRxHi = 3
+Cy5RingSymTxLo = 4
+Cy5RingSymRxLo = 5
+
+# Crypto - User instance #6
+Cy6Name = "SSL6"
+Cy6IsPolled = 1
+Cy6AcceleratorNumber = 0
+Cy6ExecutionEngine = 1
+Cy6BankNumber = 3
+Cy6NumConcurrentSymRequests = 512
+Cy6NumConcurrentAsymRequests = 64
+
+Cy6RingAsymTx =  0
+Cy6RingAsymRx =  1
+Cy6RingSymTxHi = 2
+Cy6RingSymRxHi = 3
+Cy6RingSymTxLo = 4
+Cy6RingSymRxLo = 5
+
+# Crypto - User instance #7
+Cy7Name = "SSL7"
+Cy7AcceleratorNumber = 1
+Cy7ExecutionEngine = 1
+Cy7BankNumber = 3
+Cy7IsPolled = 1
+Cy7NumConcurrentSymRequests = 512
+Cy7NumConcurrentAsymRequests = 64
+
+Cy7RingAsymTx =  0
+Cy7RingAsymRx =  1
+Cy7RingSymTxHi = 2
+Cy7RingSymRxHi = 3
+Cy7RingSymTxLo = 4
+Cy7RingSymRxLo = 5
+
+# Crypto - User instance #8
+Cy8Name = "SSL8"
+Cy8IsPolled = 1
+Cy8AcceleratorNumber = 0
+Cy8ExecutionEngine = 0
+Cy8BankNumber = 4
+Cy8NumConcurrentSymRequests = 512
+Cy8NumConcurrentAsymRequests = 64
+
+Cy8RingAsymTx =  0
+Cy8RingAsymRx =  1
+Cy8RingSymTxHi = 2
+Cy8RingSymRxHi = 3
+Cy8RingSymTxLo = 4
+Cy8RingSymRxLo = 5
+
+# Crypto - User instance #9
+Cy9Name = "SSL9"
+Cy9IsPolled = 1
+Cy9AcceleratorNumber = 1
+Cy9ExecutionEngine = 0
+Cy9BankNumber = 4
+Cy9NumConcurrentSymRequests = 512
+Cy9NumConcurrentAsymRequests = 64
+
+Cy9RingAsymTx =  0
+Cy9RingAsymRx =  1
+Cy9RingSymTxHi = 2
+Cy9RingSymRxHi = 3
+Cy9RingSymTxLo = 4
+Cy9RingSymRxLo = 5
+
+# Crypto - User instance #10
+Cy10Name = "SSL10"
+Cy10IsPolled = 1
+Cy10AcceleratorNumber = 0
+Cy10ExecutionEngine = 1
+Cy10BankNumber = 5
+Cy10NumConcurrentSymRequests = 512
+Cy10NumConcurrentAsymRequests = 64
+
+Cy10RingAsymTx =  0
+Cy10RingAsymRx =  1
+Cy10RingSymTxHi = 2
+Cy10RingSymRxHi = 3
+Cy10RingSymTxLo = 4
+Cy10RingSymRxLo = 5
+
+# Crypto - User instance #11
+Cy11Name = "SSL11"
+Cy11IsPolled = 1
+Cy11AcceleratorNumber = 1
+Cy11ExecutionEngine = 1
+Cy11BankNumber = 5
+Cy11NumConcurrentSymRequests = 512
+Cy11NumConcurrentAsymRequests = 64
+
+Cy11RingAsymTx =  0
+Cy11RingAsymRx =  1
+Cy11RingSymTxHi = 2
+Cy11RingSymRxHi = 3
+Cy11RingSymTxLo = 4
+Cy11RingSymRxLo = 5
+
+# Crypto - User instance #12
+Cy12Name = "SSL12"
+Cy12IsPolled = 1
+Cy12AcceleratorNumber = 0
+Cy12ExecutionEngine = 0
+Cy12BankNumber = 6
+Cy12NumConcurrentSymRequests = 512
+Cy12NumConcurrentAsymRequests = 64
+
+Cy12RingAsymTx =  0
+Cy12RingAsymRx =  1
+Cy12RingSymTxHi = 2
+Cy12RingSymRxHi = 3
+Cy12RingSymTxLo = 4
+Cy12RingSymRxLo = 5
+
+# Crypto - User instance #13
+Cy13Name = "SSL13"
+Cy13IsPolled = 1
+Cy13AcceleratorNumber = 1
+Cy13ExecutionEngine = 0
+Cy13BankNumber = 6
+Cy13NumConcurrentSymRequests = 512
+Cy13NumConcurrentAsymRequests = 64
+
+Cy13RingAsymTx =  0
+Cy13RingAsymRx =  1
+Cy13RingSymTxHi = 2
+Cy13RingSymRxHi = 3
+Cy13RingSymTxLo = 4
+Cy13RingSymRxLo = 5
+
+# Crypto - User instance #14
+Cy14Name = "SSL14"
+Cy14IsPolled = 1
+Cy14AcceleratorNumber = 0
+Cy14ExecutionEngine = 1
+Cy14BankNumber = 7
+Cy14NumConcurrentSymRequests = 512
+Cy14NumConcurrentAsymRequests = 64
+
+Cy14RingAsymTx =  0
+Cy14RingAsymRx =  1
+Cy14RingSymTxHi = 2
+Cy14RingSymRxHi = 3
+Cy14RingSymTxLo = 4
+Cy14RingSymRxLo = 5
+
+# Crypto - User instance #15
+Cy15Name = "SSL15"
+Cy15IsPolled = 1
+Cy15AcceleratorNumber = 1
+Cy15ExecutionEngine = 1
+Cy15BankNumber = 7
+Cy15NumConcurrentSymRequests = 512
+Cy15NumConcurrentAsymRequests = 64
+
+Cy15RingAsymTx =  0
+Cy15RingAsymRx =  1
+Cy15RingSymTxHi = 2
+Cy15RingSymRxHi = 3
+Cy15RingSymTxLo = 4
+Cy15RingSymRxLo = 5
diff --git a/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf b/examples/dpdk_qat/config_files/shumway_B0/dh89xxcc_qa_dev1.conf
new file mode 100644 (file)
index 0000000..836de07
--- /dev/null
@@ -0,0 +1,537 @@
+#########################################################################
+#
+# @par
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#########################################################################
+########################################################
+#
+# This file is the configuration for a single dh89xxcc_qa
+# device.
+#
+# Each device has up to two accelerators.
+# - The client may load balance between these
+#   accelerators.
+# Each accelerator has 8 independent ring banks.
+# - The interrupt for each can be directed to a
+#   specific core.
+# Each ring bank as 16 rings (hardware assisted queues).
+#
+########################################################
+
+##############################################
+# General Section
+##############################################
+
+[GENERAL]
+ServicesEnabled = cy0;cy1
+
+# Look Aside Cryptographic Configuration
+cyHmacAuthMode = 1
+
+# Look Aside Compression Configuration
+dcTotalSRAMAvailable = 0
+dcSRAMPerInstance = 0
+
+# Firmware Location Configuration
+Firmware_UofPath = uof_firmware.bin
+Firmware_MmpPath = mmp_firmware.bin
+
+# QAT Parameters
+Accel0AdminBankNumber = 0
+Accel0AcceleratorNumber = 0
+Accel0AdminTx = 0
+Accel0AdminRx = 1
+
+Accel1AcceleratorNumber = 1
+Accel1AdminBankNumber = 0
+Accel1AdminTx = 0
+Accel1AdminRx = 1
+
+#Statistics, valid values: 1,0
+statsGeneral = 1
+statsDc = 1
+statsDh = 1
+statsDrbg = 1
+statsDsa = 1
+statsEcc = 1
+statsKeyGen = 1
+statsLn = 1
+statsPrime = 1
+statsRsa = 1
+statsSym = 1
+
+#Debug feature, if set to 1 it enables additional entries in /proc filesystem
+ProcDebug = 1
+
+
+################################################
+#
+# Hardware Access Ring Bank Configuration
+# Each Accelerator has 8 ring banks (0-7)
+# If the OS supports MSI-X, each ring bank has an
+# steerable MSI-x interrupt which may be
+# affinitized to a particular node/core.
+#
+################################################
+
+
+[Accelerator0]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 8
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 10
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 12
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 14
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 24
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 26
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 28
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 30
+Bank7InterruptCoalescingNumResponses = 0
+
+[Accelerator1]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 9
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 11
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 13
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 15
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 25
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 27
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 29
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 31
+Bank7InterruptCoalescingNumResponses = 0
+
+#######################################################
+#
+# Logical Instances Section
+# A logical instance allows each address domain
+# (kernel space and individual user space processes)
+# to configure rings (i.e. hardware assisted queues)
+# to be used by that address domain and to define the
+# behavior of that ring.
+#
+# The address domains are in the following format
+# - For kernel address domains
+#       [KERNEL]
+# - For user process address domains
+#   [xxxxx]
+#   Where xxxxx may be any ascii value which uniquely identifies
+#   the user mode process.
+#   To allow the driver correctly configure the
+#   logical instances associated with this user process,
+#   the process must call the icp_sal_userStart(...)
+#   passing the xxxxx string during process initialisation.
+#   When the user space process is finish it must call
+#   icp_sal_userStop(...) to free resources.
+#   If there are multiple devices present in the system all conf
+#   files that describe the devices must have the same address domain
+#   sections even if the address domain does not configure any instances
+#   on that particular device. So if icp_sal_userStart("xxxxx") is called
+#   then user process address domain [xxxxx] needs to be present in all
+#   conf files for all devices in the system.
+#
+# Items configurable by a logical instance are:
+# - Name of the logical instance
+# - The accelerator associated with this logical
+#   instance
+# - The ring bank associated with this logical
+#   instance.
+# - The response mode associated wth this logical instance (0
+#   for IRQ or 1 for polled).
+# - The ring for receiving and the ring for transmitting.
+# - The number of concurrent requests supported by a pair of
+#   rings on this instance (tx + rx). Note this number affects
+#   the amount of memory allocated by the driver. Also
+#   Bank<n>InterruptCoalescingNumResponses is only supported for
+#   number of concurrent requests equal to 512.
+#
+# Note: Logical instances may not share the same ring, but
+#           may share a ring bank.
+#
+# The format of the logical instances are:
+# - For crypto:
+#               Cy<n>Name = "xxxx"
+#               Cy<n>AcceleratorNumber = 0|1
+#               Cy<n>BankNumber = 0-7
+#               Cy<n>IsPolled = 0|1
+#               Cy<n>NumConcurrentSymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>NumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>RingAsymTx = 0-15
+#               Cy<n>RingAsymRx = 0-15
+#               Cy<n>RingSymTxHi = 0-15
+#               Cy<n>RingSymRxHi = 0-15
+#               Cy<n>RingSymRx = 0-15
+#
+# - For Data Compression
+#               Dc<n>Name = "xxxx"
+#               Dc<n>AcceleratorNumber = 0|1
+#               Dc<n>BankNumber = 0-7
+#               Dc<n>IsPolled = 0|1
+#               Dc<n>NumConcurrentRequests = 64|128|256|512|1024|2048|4096
+#               Dc<n>RingTx = 0-15
+#               Dc<n>RingRx = 0-15
+#
+# Where:
+#       - n is the number of this logical instance starting at 0.
+#       - xxxx may be any ascii value which identifies the logical instance.
+#
+########################################################
+
+##############################################
+# Kernel Instances Section
+##############################################
+[KERNEL]
+NumberCyInstances = 0
+NumberDcInstances = 0
+
+
+##############################################
+# User Process Instance Section
+##############################################
+[SSL]
+NumberCyInstances = 16
+NumberDcInstances = 0
+
+# Crypto - User instance #0
+Cy0Name = "SSL0"
+Cy0IsPolled = 1
+Cy0AcceleratorNumber = 0
+Cy0ExecutionEngine = 0
+Cy0BankNumber = 0
+Cy0NumConcurrentSymRequests = 512
+Cy0NumConcurrentAsymRequests = 64
+
+Cy0RingAsymTx =  2
+Cy0RingAsymRx =  3
+Cy0RingSymTxHi = 4
+Cy0RingSymRxHi = 5
+Cy0RingSymTxLo = 6
+Cy0RingSymRxLo = 7
+
+# Crypto - User instance #1
+Cy1Name = "SSL1"
+Cy1AcceleratorNumber = 1
+Cy1ExecutionEngine = 0
+Cy1BankNumber = 0
+Cy1IsPolled = 1
+Cy1NumConcurrentSymRequests = 512
+Cy1NumConcurrentAsymRequests = 64
+
+Cy1RingAsymTx =  2
+Cy1RingAsymRx =  3
+Cy1RingSymTxHi = 4
+Cy1RingSymRxHi = 5
+Cy1RingSymTxLo = 6
+Cy1RingSymRxLo = 7
+
+# Crypto - User instance #2
+Cy2Name = "SSL2"
+Cy2IsPolled= 1
+Cy2AcceleratorNumber = 0
+Cy2ExecutionEngine = 1
+Cy2BankNumber = 1
+Cy2NumConcurrentSymRequests = 512
+Cy2NumConcurrentAsymRequests = 64
+
+Cy2RingAsymTx =  0
+Cy2RingAsymRx =  1
+Cy2RingSymTxHi = 2
+Cy2RingSymRxHi = 3
+Cy2RingSymTxLo = 4
+Cy2RingSymRxLo = 5
+
+# Crypto - User instance #3
+Cy3Name = "SSL3"
+Cy3AcceleratorNumber = 1
+Cy3ExecutionEngine = 1
+Cy3BankNumber = 1
+Cy3IsPolled = 1
+Cy3NumConcurrentSymRequests = 512
+Cy3NumConcurrentAsymRequests = 64
+
+Cy3RingAsymTx =  0
+Cy3RingAsymRx =  1
+Cy3RingSymTxHi = 2
+Cy3RingSymRxHi = 3
+Cy3RingSymTxLo = 4
+Cy3RingSymRxLo = 5
+
+
+# Crypto - User instance #4
+Cy4Name = "SSL4"
+Cy4IsPolled= 1
+Cy4AcceleratorNumber = 0
+Cy4ExecutionEngine = 0
+Cy4BankNumber = 2
+Cy4NumConcurrentSymRequests = 512
+Cy4NumConcurrentAsymRequests = 64
+
+Cy4RingAsymTx =  0
+Cy4RingAsymRx =  1
+Cy4RingSymTxHi = 2
+Cy4RingSymRxHi = 3
+Cy4RingSymTxLo = 4
+Cy4RingSymRxLo = 5
+
+# Crypto - User instance #5
+Cy5Name = "SSL5"
+Cy5AcceleratorNumber = 1
+Cy5ExecutionEngine = 0
+Cy5BankNumber = 2
+Cy5IsPolled = 1
+Cy5NumConcurrentSymRequests = 512
+Cy5NumConcurrentAsymRequests = 64
+
+Cy5RingAsymTx =  0
+Cy5RingAsymRx =  1
+Cy5RingSymTxHi = 2
+Cy5RingSymRxHi = 3
+Cy5RingSymTxLo = 4
+Cy5RingSymRxLo = 5
+
+# Crypto - User instance #6
+Cy6Name = "SSL6"
+Cy6IsPolled = 1
+Cy6AcceleratorNumber = 0
+Cy6ExecutionEngine = 1
+Cy6BankNumber = 3
+Cy6NumConcurrentSymRequests = 512
+Cy6NumConcurrentAsymRequests = 64
+
+Cy6RingAsymTx =  0
+Cy6RingAsymRx =  1
+Cy6RingSymTxHi = 2
+Cy6RingSymRxHi = 3
+Cy6RingSymTxLo = 4
+Cy6RingSymRxLo = 5
+
+# Crypto - User instance #7
+Cy7Name = "SSL7"
+Cy7AcceleratorNumber = 1
+Cy7ExecutionEngine = 1
+Cy7BankNumber = 3
+Cy7IsPolled = 1
+Cy7NumConcurrentSymRequests = 512
+Cy7NumConcurrentAsymRequests = 64
+
+Cy7RingAsymTx =  0
+Cy7RingAsymRx =  1
+Cy7RingSymTxHi = 2
+Cy7RingSymRxHi = 3
+Cy7RingSymTxLo = 4
+Cy7RingSymRxLo = 5
+
+# Crypto - User instance #8
+Cy8Name = "SSL8"
+Cy8IsPolled = 1
+Cy8AcceleratorNumber = 0
+Cy8ExecutionEngine = 0
+Cy8BankNumber = 4
+Cy8NumConcurrentSymRequests = 512
+Cy8NumConcurrentAsymRequests = 64
+
+Cy8RingAsymTx =  0
+Cy8RingAsymRx =  1
+Cy8RingSymTxHi = 2
+Cy8RingSymRxHi = 3
+Cy8RingSymTxLo = 4
+Cy8RingSymRxLo = 5
+
+# Crypto - User instance #9
+Cy9Name = "SSL9"
+Cy9IsPolled = 1
+Cy9AcceleratorNumber = 1
+Cy9ExecutionEngine = 0
+Cy9BankNumber = 4
+Cy9NumConcurrentSymRequests = 512
+Cy9NumConcurrentAsymRequests = 64
+
+Cy9RingAsymTx =  0
+Cy9RingAsymRx =  1
+Cy9RingSymTxHi = 2
+Cy9RingSymRxHi = 3
+Cy9RingSymTxLo = 4
+Cy9RingSymRxLo = 5
+
+# Crypto - User instance #10
+Cy10Name = "SSL10"
+Cy10IsPolled = 1
+Cy10AcceleratorNumber = 0
+Cy10ExecutionEngine = 1
+Cy10BankNumber = 5
+Cy10NumConcurrentSymRequests = 512
+Cy10NumConcurrentAsymRequests = 64
+
+Cy10RingAsymTx =  0
+Cy10RingAsymRx =  1
+Cy10RingSymTxHi = 2
+Cy10RingSymRxHi = 3
+Cy10RingSymTxLo = 4
+Cy10RingSymRxLo = 5
+
+# Crypto - User instance #11
+Cy11Name = "SSL11"
+Cy11IsPolled = 1
+Cy11AcceleratorNumber = 1
+Cy11ExecutionEngine = 1
+Cy11BankNumber = 5
+Cy11NumConcurrentSymRequests = 512
+Cy11NumConcurrentAsymRequests = 64
+
+Cy11RingAsymTx =  0
+Cy11RingAsymRx =  1
+Cy11RingSymTxHi = 2
+Cy11RingSymRxHi = 3
+Cy11RingSymTxLo = 4
+Cy11RingSymRxLo = 5
+
+# Crypto - User instance #12
+Cy12Name = "SSL12"
+Cy12IsPolled = 1
+Cy12AcceleratorNumber = 0
+Cy12ExecutionEngine = 0
+Cy12BankNumber = 6
+Cy12NumConcurrentSymRequests = 512
+Cy12NumConcurrentAsymRequests = 64
+
+Cy12RingAsymTx =  0
+Cy12RingAsymRx =  1
+Cy12RingSymTxHi = 2
+Cy12RingSymRxHi = 3
+Cy12RingSymTxLo = 4
+Cy12RingSymRxLo = 5
+
+# Crypto - User instance #13
+Cy13Name = "SSL13"
+Cy13IsPolled = 1
+Cy13AcceleratorNumber = 1
+Cy13ExecutionEngine = 0
+Cy13BankNumber = 6
+Cy13NumConcurrentSymRequests = 512
+Cy13NumConcurrentAsymRequests = 64
+
+Cy13RingAsymTx =  0
+Cy13RingAsymRx =  1
+Cy13RingSymTxHi = 2
+Cy13RingSymRxHi = 3
+Cy13RingSymTxLo = 4
+Cy13RingSymRxLo = 5
+
+# Crypto - User instance #14
+Cy14Name = "SSL14"
+Cy14IsPolled = 1
+Cy14AcceleratorNumber = 0
+Cy14ExecutionEngine = 1
+Cy14BankNumber = 7
+Cy14NumConcurrentSymRequests = 512
+Cy14NumConcurrentAsymRequests = 64
+
+Cy14RingAsymTx =  0
+Cy14RingAsymRx =  1
+Cy14RingSymTxHi = 2
+Cy14RingSymRxHi = 3
+Cy14RingSymTxLo = 4
+Cy14RingSymRxLo = 5
+
+# Crypto - User instance #15
+Cy15Name = "SSL15"
+Cy15IsPolled = 1
+Cy15AcceleratorNumber = 1
+Cy15ExecutionEngine = 1
+Cy15BankNumber = 7
+Cy15NumConcurrentSymRequests = 512
+Cy15NumConcurrentAsymRequests = 64
+
+Cy15RingAsymTx =  0
+Cy15RingAsymRx =  1
+Cy15RingSymTxHi = 2
+Cy15RingSymRxHi = 3
+Cy15RingSymTxLo = 4
+Cy15RingSymRxLo = 5
diff --git a/examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf b/examples/dpdk_qat/config_files/stargo_B0/dh89xxcc_qa_dev0.conf
new file mode 100644 (file)
index 0000000..4639e4b
--- /dev/null
@@ -0,0 +1,409 @@
+#########################################################################
+#
+# @par
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#########################################################################
+########################################################
+#
+# This file is the configuration for a single dh89xxcc_qa
+# device.
+#
+# Each device has up to two accelerators.
+# - The client may load balance between these
+#   accelerators.
+# Each accelerator has 8 independent ring banks.
+# - The interrupt for each can be directed to a
+#   specific core.
+# Each ring bank as 16 rings (hardware assisted queues).
+#
+########################################################
+
+##############################################
+# General Section
+##############################################
+
+[GENERAL]
+ServicesEnabled = cy0;cy1
+
+# Look Aside Cryptographic Configuration
+cyHmacAuthMode = 1
+
+# Look Aside Compression Configuration
+dcTotalSRAMAvailable = 0
+dcSRAMPerInstance = 0
+
+# Firmware Location Configuration
+Firmware_UofPath = uof_firmware.bin
+Firmware_MmpPath = mmp_firmware.bin
+
+# QAT Parameters
+Accel0AdminBankNumber = 0
+Accel0AcceleratorNumber = 0
+Accel0AdminTx = 0
+Accel0AdminRx = 1
+
+Accel1AcceleratorNumber = 1
+Accel1AdminBankNumber = 0
+Accel1AdminTx = 0
+Accel1AdminRx = 1
+
+#Statistics, valid values: 1,0
+statsGeneral = 1
+statsDc = 1
+statsDh = 1
+statsDrbg = 1
+statsDsa = 1
+statsEcc = 1
+statsKeyGen = 1
+statsLn = 1
+statsPrime = 1
+statsRsa = 1
+statsSym = 1
+
+#Debug feature, if set to 1 it enables additional entries in /proc filesystem
+ProcDebug = 1
+
+
+################################################
+#
+# Hardware Access Ring Bank Configuration
+# Each Accelerator has 8 ring banks (0-7)
+# If the OS supports MSI-X, each ring bank has an
+# steerable MSI-x interrupt which may be
+# affinitized to a particular node/core.
+#
+################################################
+
+
+[Accelerator0]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 0
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 2
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 4
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 6
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 7
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 7
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 7
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 7
+Bank7InterruptCoalescingNumResponses = 0
+
+[Accelerator1]
+Bank0InterruptCoalescingEnabled = 1
+Bank0InterruptCoalescingTimerNs = 10000
+Bank0CoreIDAffinity = 1
+Bank0InterruptCoalescingNumResponses = 0
+
+Bank1InterruptCoalescingEnabled = 1
+Bank1InterruptCoalescingTimerNs = 10000
+Bank1CoreIDAffinity = 3
+Bank1InterruptCoalescingNumResponses = 0
+
+Bank2InterruptCoalescingEnabled = 1
+Bank2InterruptCoalescingTimerNs = 10000
+Bank2CoreIDAffinity = 5
+Bank2InterruptCoalescingNumResponses = 0
+
+Bank3InterruptCoalescingEnabled = 1
+Bank3InterruptCoalescingTimerNs = 10000
+Bank3CoreIDAffinity = 7
+Bank3InterruptCoalescingNumResponses = 0
+
+Bank4InterruptCoalescingEnabled = 1
+Bank4InterruptCoalescingTimerNs = 10000
+Bank4CoreIDAffinity = 7
+Bank4InterruptCoalescingNumResponses = 0
+
+Bank5InterruptCoalescingEnabled = 1
+Bank5InterruptCoalescingTimerNs = 10000
+Bank5CoreIDAffinity = 7
+Bank5InterruptCoalescingNumResponses = 0
+
+Bank6InterruptCoalescingEnabled = 1
+Bank6InterruptCoalescingTimerNs = 10000
+Bank6CoreIDAffinity = 7
+Bank6InterruptCoalescingNumResponses = 0
+
+Bank7InterruptCoalescingEnabled = 1
+Bank7InterruptCoalescingTimerNs = 10000
+Bank7CoreIDAffinity = 7
+Bank7InterruptCoalescingNumResponses = 0
+
+#######################################################
+#
+# Logical Instances Section
+# A logical instance allows each address domain
+# (kernel space and individual user space processes)
+# to configure rings (i.e. hardware assisted queues)
+# to be used by that address domain and to define the
+# behavior of that ring.
+#
+# The address domains are in the following format
+# - For kernel address domains
+#       [KERNEL]
+# - For user process address domains
+#   [xxxxx]
+#   Where xxxxx may be any ascii value which uniquely identifies
+#   the user mode process.
+#   To allow the driver correctly configure the
+#   logical instances associated with this user process,
+#   the process must call the icp_sal_userStart(...)
+#   passing the xxxxx string during process initialisation.
+#   When the user space process is finish it must call
+#   icp_sal_userStop(...) to free resources.
+#   If there are multiple devices present in the system all conf
+#   files that describe the devices must have the same address domain
+#   sections even if the address domain does not configure any instances
+#   on that particular device. So if icp_sal_userStart("xxxxx") is called
+#   then user process address domain [xxxxx] needs to be present in all
+#   conf files for all devices in the system.
+#
+# Items configurable by a logical instance are:
+# - Name of the logical instance
+# - The accelerator associated with this logical
+#   instance
+# - The ring bank associated with this logical
+#   instance.
+# - The response mode associated wth this logical instance (0
+#   for IRQ or 1 for polled).
+# - The ring for receiving and the ring for transmitting.
+# - The number of concurrent requests supported by a pair of
+#   rings on this instance (tx + rx). Note this number affects
+#   the amount of memory allocated by the driver. Also
+#   Bank<n>InterruptCoalescingNumResponses is only supported for
+#   number of concurrent requests equal to 512.
+#
+# Note: Logical instances may not share the same ring, but
+#           may share a ring bank.
+#
+# The format of the logical instances are:
+# - For crypto:
+#               Cy<n>Name = "xxxx"
+#               Cy<n>AcceleratorNumber = 0|1
+#               Cy<n>BankNumber = 0-7
+#               Cy<n>IsPolled = 0|1
+#               Cy<n>NumConcurrentSymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>NumConcurrentAsymRequests = 64|128|256|512|1024|2048|4096
+#               Cy<n>RingAsymTx = 0-15
+#               Cy<n>RingAsymRx = 0-15
+#               Cy<n>RingSymTxHi = 0-15
+#               Cy<n>RingSymRxHi = 0-15
+#               Cy<n>RingSymRx = 0-15
+#
+# - For Data Compression
+#               Dc<n>Name = "xxxx"
+#               Dc<n>AcceleratorNumber = 0|1
+#               Dc<n>BankNumber = 0-7
+#               Dc<n>IsPolled = 0|1
+#               Dc<n>NumConcurrentRequests = 64|128|256|512|1024|2048|4096
+#               Dc<n>RingTx = 0-15
+#               Dc<n>RingRx = 0-15
+#
+# Where:
+#       - n is the number of this logical instance starting at 0.
+#       - xxxx may be any ascii value which identifies the logical instance.
+#
+########################################################
+
+##############################################
+# Kernel Instances Section
+##############################################
+[KERNEL]
+NumberCyInstances = 0
+NumberDcInstances = 0
+
+
+##############################################
+# User Process Instance Section
+##############################################
+[SSL]
+NumberCyInstances = 8
+NumberDcInstances = 0
+
+# Crypto - User instance #0
+Cy0Name = "SSL0"
+Cy0IsPolled = 1
+Cy0AcceleratorNumber = 0
+Cy0ExecutionEngine = 0
+Cy0BankNumber = 0
+Cy0NumConcurrentSymRequests = 512
+Cy0NumConcurrentAsymRequests = 64
+
+Cy0RingAsymTx =  2
+Cy0RingAsymRx =  3
+Cy0RingSymTxHi = 4
+Cy0RingSymRxHi = 5
+Cy0RingSymTxLo = 6
+Cy0RingSymRxLo = 7
+
+# Crypto - User instance #1
+Cy1Name = "SSL1"
+Cy1AcceleratorNumber = 1
+Cy1ExecutionEngine = 0
+Cy1BankNumber = 0
+Cy1IsPolled = 1
+Cy1NumConcurrentSymRequests = 512
+Cy1NumConcurrentAsymRequests = 64
+
+Cy1RingAsymTx =  2
+Cy1RingAsymRx =  3
+Cy1RingSymTxHi = 4
+Cy1RingSymRxHi = 5
+Cy1RingSymTxLo = 6
+Cy1RingSymRxLo = 7
+
+# Crypto - User instance #2
+Cy2Name = "SSL2"
+Cy2IsPolled= 1
+Cy2AcceleratorNumber = 0
+Cy2ExecutionEngine = 1
+Cy2BankNumber = 1
+Cy2NumConcurrentSymRequests = 512
+Cy2NumConcurrentAsymRequests = 64
+
+Cy2RingAsymTx =  0
+Cy2RingAsymRx =  1
+Cy2RingSymTxHi = 2
+Cy2RingSymRxHi = 3
+Cy2RingSymTxLo = 4
+Cy2RingSymRxLo = 5
+
+# Crypto - User instance #3
+Cy3Name = "SSL3"
+Cy3AcceleratorNumber = 1
+Cy3ExecutionEngine = 1
+Cy3BankNumber = 1
+Cy3IsPolled = 1
+Cy3NumConcurrentSymRequests = 512
+Cy3NumConcurrentAsymRequests = 64
+
+Cy3RingAsymTx =  0
+Cy3RingAsymRx =  1
+Cy3RingSymTxHi = 2
+Cy3RingSymRxHi = 3
+Cy3RingSymTxLo = 4
+Cy3RingSymRxLo = 5
+
+
+# Crypto - User instance #4
+Cy4Name = "SSL4"
+Cy4IsPolled= 1
+Cy4AcceleratorNumber = 0
+Cy4ExecutionEngine = 0
+Cy4BankNumber = 2
+Cy4NumConcurrentSymRequests = 512
+Cy4NumConcurrentAsymRequests = 64
+
+Cy4RingAsymTx =  0
+Cy4RingAsymRx =  1
+Cy4RingSymTxHi = 2
+Cy4RingSymRxHi = 3
+Cy4RingSymTxLo = 4
+Cy4RingSymRxLo = 5
+
+# Crypto - User instance #5
+Cy5Name = "SSL5"
+Cy5AcceleratorNumber = 1
+Cy5ExecutionEngine = 0
+Cy5BankNumber = 2
+Cy5IsPolled = 1
+Cy5NumConcurrentSymRequests = 512
+Cy5NumConcurrentAsymRequests = 64
+
+Cy5RingAsymTx =  0
+Cy5RingAsymRx =  1
+Cy5RingSymTxHi = 2
+Cy5RingSymRxHi = 3
+Cy5RingSymTxLo = 4
+Cy5RingSymRxLo = 5
+
+# Crypto - User instance #6
+Cy6Name = "SSL6"
+Cy6IsPolled = 1
+Cy6AcceleratorNumber = 0
+Cy6ExecutionEngine = 1
+Cy6BankNumber = 3
+Cy6NumConcurrentSymRequests = 512
+Cy6NumConcurrentAsymRequests = 64
+
+Cy6RingAsymTx =  0
+Cy6RingAsymRx =  1
+Cy6RingSymTxHi = 2
+Cy6RingSymRxHi = 3
+Cy6RingSymTxLo = 4
+Cy6RingSymRxLo = 5
+
+# Crypto - User instance #7
+Cy7Name = "SSL7"
+Cy7AcceleratorNumber = 1
+Cy7ExecutionEngine = 1
+Cy7BankNumber = 3
+Cy7IsPolled = 1
+Cy7NumConcurrentSymRequests = 512
+Cy7NumConcurrentAsymRequests = 64
+
+Cy7RingAsymTx =  0
+Cy7RingAsymRx =  1
+Cy7RingSymTxHi = 2
+Cy7RingSymRxHi = 3
+Cy7RingSymTxLo = 4
+Cy7RingSymRxLo = 5
diff --git a/examples/dpdk_qat/crypto.c b/examples/dpdk_qat/crypto.c
new file mode 100644 (file)
index 0000000..99680be
--- /dev/null
@@ -0,0 +1,921 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#define CPA_CY_SYM_DP_TMP_WORKAROUND 1
+
+#include "cpa.h"
+#include "cpa_types.h"
+#include "cpa_cy_sym_dp.h"
+#include "cpa_cy_common.h"
+#include "cpa_cy_im.h"
+#include "icp_sal_user.h"
+#include "icp_sal_poll.h"
+
+#include "crypto.h"
+
+#define NUM_HMAC       (10)
+#define NUM_CRYPTO     (7)
+
+
+/* CIPHER KEY LENGTHS */
+#define KEY_SIZE_64_IN_BYTES   (64 / 8)
+#define KEY_SIZE_56_IN_BYTES   (56 / 8)
+#define KEY_SIZE_128_IN_BYTES  (128 / 8)
+#define KEY_SIZE_168_IN_BYTES  (168 / 8)
+#define KEY_SIZE_192_IN_BYTES  (192 / 8)
+#define KEY_SIZE_256_IN_BYTES  (256 / 8)
+
+/* HMAC AUTH KEY LENGTHS */
+#define AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES      (128 / 8)
+#define SHA1_AUTH_KEY_LENGTH_IN_BYTES          (160 / 8)
+#define SHA224_AUTH_KEY_LENGTH_IN_BYTES                (224 / 8)
+#define SHA256_AUTH_KEY_LENGTH_IN_BYTES                (256 / 8)
+#define SHA384_AUTH_KEY_LENGTH_IN_BYTES                (384 / 8)
+#define SHA512_AUTH_KEY_LENGTH_IN_BYTES                (512 / 8)
+#define MD5_AUTH_KEY_LENGTH_IN_BYTES           (128 / 8)
+
+/* HASH DIGEST LENGHTS */
+#define AES_XCBC_DIGEST_LENGTH_IN_BYTES                (128 / 8)
+#define AES_XCBC_96_DIGEST_LENGTH_IN_BYTES     (96 / 8)
+#define MD5_DIGEST_LENGTH_IN_BYTES             (128 / 8)
+#define SHA1_DIGEST_LENGTH_IN_BYTES            (160 / 8)
+#define SHA1_96_DIGEST_LENGTH_IN_BYTES         (96 / 8)
+#define SHA224_DIGEST_LENGTH_IN_BYTES          (224 / 8)
+#define SHA256_DIGEST_LENGTH_IN_BYTES          (256 / 8)
+#define SHA384_DIGEST_LENGTH_IN_BYTES          (384 / 8)
+#define SHA512_DIGEST_LENGTH_IN_BYTES          (512 / 8)
+
+#define IV_LENGTH_16_BYTES     (16)
+#define IV_LENGTH_8_BYTES      (8)
+
+
+/*
+ * rte_memzone is used to allocate physically contiguous virtual memory.
+ * In this application we allocate a single block and divide between variables
+ * which require a virtual to physical mapping for use by the QAT driver.
+ * Virt2phys is only performed during initialisation and not on the data-path.
+ */
+
+#define LCORE_MEMZONE_SIZE     (1 << 22)
+
+struct lcore_memzone
+{
+       const struct rte_memzone *memzone;
+       void *next_free_address;
+};
+
+/*
+ * Size the qa software response queue.
+ * Note: Head and Tail are 8 bit, therefore, the queue is
+ * fixed to 256 entries.
+ */
+#define CRYPTO_SOFTWARE_QUEUE_SIZE 256
+
+struct qa_callbackQueue {
+       uint8_t head;
+       uint8_t tail;
+       uint16_t numEntries;
+       struct rte_mbuf *qaCallbackRing[CRYPTO_SOFTWARE_QUEUE_SIZE];
+};
+
+struct qa_core_conf {
+       CpaCySymDpSessionCtx *encryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
+       CpaCySymDpSessionCtx *decryptSessionHandleTbl[NUM_CRYPTO][NUM_HMAC];
+       CpaInstanceHandle instanceHandle;
+       struct qa_callbackQueue callbackQueue;
+       uint64_t qaOutstandingRequests;
+       uint64_t numResponseAttempts;
+       uint8_t kickFreq;
+       void *pPacketIV;
+       CpaPhysicalAddr packetIVPhy;
+       struct lcore_memzone lcoreMemzone;
+} __rte_cache_aligned;
+
+#define MAX_CORES   (RTE_MAX_LCORE)
+
+static struct qa_core_conf qaCoreConf[MAX_CORES];
+
+/*
+ *Create maximum possible key size,
+ *One for cipher and one for hash
+ */
+struct glob_keys {
+       uint8_t cipher_key[32];
+       uint8_t hash_key[64];
+       uint8_t iv[16];
+};
+
+struct glob_keys g_crypto_hash_keys = {
+       .cipher_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
+               0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
+               0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
+               0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20},
+       .hash_key = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
+               0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10,
+               0x11,0x12,0x13,0x14,0x15,0x16,0x17,0x18,
+               0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f,0x20,
+               0x21,0x22,0x23,0x24,0x25,0x26,0x27,0x28,
+               0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f,0x30,
+               0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,
+               0x39,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f,0x50},
+       .iv = {0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,
+               0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,0x10}
+};
+
+/*
+ * Offsets from the start of the packet.
+ *
+ */
+#define PACKET_DATA_START_PHYS(p) \
+               ((p)->buf_physaddr + ((char *)p->pkt.data - (char *)p->buf_addr))
+
+/*
+ * A fixed offset to where the crypto is to be performed, which is the first
+ * byte after the Ethernet(14 bytes) and IPv4 headers(20 bytes)
+ */
+#define CRYPTO_START_OFFSET            (14+20)
+#define HASH_START_OFFSET              (14+20)
+#define CIPHER_BLOCK_DEFAULT_SIZE      (16)
+#define HASH_BLOCK_DEFAULT_SIZE                (16)
+
+/*
+ * Offset to the opdata from the start of the data portion of packet.
+ * Assumption: The buffer is physically contiguous.
+ * +18 takes this to the next cache line.
+ */
+
+#define CRYPTO_OFFSET_TO_OPDATA        (ETHER_MAX_LEN+18)
+
+/*
+ * Default number of requests to place on the hardware ring before kicking the
+ * ring pointers.
+ */
+#define CRYPTO_BURST_TX        (16)
+
+/*
+ * Only call the qa poll function when the number responses in the software
+ * queue drops below this number.
+ */
+#define CRYPTO_QUEUED_RESP_POLL_THRESHOLD      (32)
+
+/*
+ * Limit the number of polls per call to get_next_response.
+ */
+#define GET_NEXT_RESPONSE_FREQ (32)
+
+/*
+ * Max number of responses to pull from the qa in one poll.
+ */
+#define CRYPTO_MAX_RESPONSE_QUOTA \
+               (CRYPTO_SOFTWARE_QUEUE_SIZE-CRYPTO_QUEUED_RESP_POLL_THRESHOLD-1)
+
+#if (CRYPTO_QUEUED_RESP_POLL_THRESHOLD + CRYPTO_MAX_RESPONSE_QUOTA >= \
+               CRYPTO_SOFTWARE_QUEUE_SIZE)
+#error Its possible to overflow the qa response Q with current poll and \
+               response quota.
+#endif
+
+static void
+crypto_callback(CpaCySymDpOpData *pOpData,
+               __rte_unused CpaStatus status,
+               __rte_unused CpaBoolean verifyResult)
+{
+       uint32_t lcore_id;
+       lcore_id = rte_lcore_id();
+       struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
+
+       /*
+        * Received a completion from the QA hardware.
+        * Place the response on the return queue.
+        */
+       callbackQ->qaCallbackRing[callbackQ->head] = pOpData->pCallbackTag;
+       callbackQ->head++;
+       callbackQ->numEntries++;
+       qaCoreConf[lcore_id].qaOutstandingRequests--;
+}
+
+static void
+qa_crypto_callback(CpaCySymDpOpData *pOpData, CpaStatus status,
+               CpaBoolean verifyResult)
+{
+       crypto_callback(pOpData, status, verifyResult);
+}
+
+/*
+ * Each allocation from a particular memzone lasts for the life-time of
+ * the application. No freeing of previous allocations will occur.
+ */
+static void *
+alloc_memzone_region(uint32_t length, uint32_t lcore_id)
+{
+       char *current_free_addr_ptr = NULL;
+       struct lcore_memzone *lcore_memzone = &(qaCoreConf[lcore_id].lcoreMemzone);
+
+       current_free_addr_ptr  = lcore_memzone->next_free_address;
+
+       if (current_free_addr_ptr + length >=
+               (char *)lcore_memzone->memzone->addr + lcore_memzone->memzone->len) {
+               printf("Crypto: No memory available in memzone\n");
+               return NULL;
+       }
+       lcore_memzone->next_free_address = current_free_addr_ptr + length;
+
+       return (void *)current_free_addr_ptr;
+}
+
+/*
+ * Virtual to Physical Address translation is only executed during initialization
+ * and not on the data-path.
+ */
+static CpaPhysicalAddr
+qa_v2p(void *ptr)
+{
+       const struct rte_memzone *memzone = NULL;
+       uint32_t lcore_id = 0;
+       RTE_LCORE_FOREACH(lcore_id) {
+               memzone = qaCoreConf[lcore_id].lcoreMemzone.memzone;
+
+               if ((char*) ptr >= (char *) memzone->addr &&
+                               (char*) ptr < ((char*) memzone->addr + memzone->len)) {
+                       return (CpaPhysicalAddr)
+                                       (memzone->phys_addr + ((char *) ptr - (char*) memzone->addr));
+               }
+       }
+       printf("Crypto: Corresponding physical address not found in memzone\n");
+       return (CpaPhysicalAddr) 0;
+}
+
+static CpaStatus
+getCoreAffinity(Cpa32U *coreAffinity, const CpaInstanceHandle instanceHandle)
+{
+       CpaInstanceInfo2 info;
+       Cpa16U i = 0;
+       CpaStatus status = CPA_STATUS_SUCCESS;
+
+       bzero(&info, sizeof(CpaInstanceInfo2));
+
+       status = cpaCyInstanceGetInfo2(instanceHandle, &info);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: Error getting instance info\n");
+               return CPA_STATUS_FAIL;
+       }
+       for (i = 0; i < MAX_CORES; i++) {
+               if (CPA_BITMAP_BIT_TEST(info.coreAffinity, i)) {
+                       *coreAffinity = i;
+                       return CPA_STATUS_SUCCESS;
+               }
+       }
+       return CPA_STATUS_FAIL;
+}
+
+static CpaStatus
+get_crypto_instance_on_core(CpaInstanceHandle *pInstanceHandle,
+               uint32_t lcore_id)
+{
+       Cpa16U numInstances = 0, i = 0;
+       CpaStatus status = CPA_STATUS_FAIL;
+       CpaInstanceHandle *pLocalInstanceHandles = NULL;
+       Cpa32U coreAffinity = 0;
+
+       status = cpaCyGetNumInstances(&numInstances);
+       if (CPA_STATUS_SUCCESS != status || numInstances == 0) {
+               return CPA_STATUS_FAIL;
+       }
+
+       pLocalInstanceHandles = rte_malloc("pLocalInstanceHandles",
+                       sizeof(CpaInstanceHandle) * numInstances, CACHE_LINE_SIZE);
+
+       if (NULL == pLocalInstanceHandles) {
+               return CPA_STATUS_FAIL;
+       }
+       status = cpaCyGetInstances(numInstances, pLocalInstanceHandles);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: cpaCyGetInstances failed with status: %"PRId32"\n", status);
+               rte_free((void *) pLocalInstanceHandles);
+               return CPA_STATUS_FAIL;
+       }
+
+       for (i = 0; i < numInstances; i++) {
+               status = getCoreAffinity(&coreAffinity, pLocalInstanceHandles[i]);
+               if (CPA_STATUS_SUCCESS != status) {
+                       rte_free((void *) pLocalInstanceHandles);
+                       return CPA_STATUS_FAIL;
+               }
+               if (coreAffinity == lcore_id) {
+                       printf("Crypto: instance found on core %d\n", i);
+                       *pInstanceHandle = pLocalInstanceHandles[i];
+                       return CPA_STATUS_SUCCESS;
+               }
+       }
+       /* core affinity not found */
+       rte_free((void *) pLocalInstanceHandles);
+       return CPA_STATUS_FAIL;
+}
+
+static CpaStatus
+initCySymSession(const int pkt_cipher_alg,
+               const int pkt_hash_alg, const CpaCySymHashMode hashMode,
+               const CpaCySymCipherDirection crypto_direction,
+               CpaCySymSessionCtx **ppSessionCtx,
+               const CpaInstanceHandle cyInstanceHandle,
+               const uint32_t lcore_id)
+{
+       Cpa32U sessionCtxSizeInBytes = 0;
+       CpaStatus status = CPA_STATUS_FAIL;
+       CpaBoolean isCrypto = CPA_TRUE, isHmac = CPA_TRUE;
+       CpaCySymSessionSetupData sessionSetupData;
+
+       bzero(&sessionSetupData, sizeof(CpaCySymSessionSetupData));
+
+       /* Assumption: key length is set to each algorithm's max length */
+       switch (pkt_cipher_alg) {
+       case NO_CIPHER:
+               isCrypto = CPA_FALSE;
+               break;
+       case CIPHER_DES:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_DES_ECB;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_64_IN_BYTES;
+               break;
+       case CIPHER_DES_CBC:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_DES_CBC;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_64_IN_BYTES;
+               break;
+       case CIPHER_DES3:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_3DES_ECB;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_192_IN_BYTES;
+               break;
+       case CIPHER_DES3_CBC:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_3DES_CBC;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_192_IN_BYTES;
+               break;
+       case CIPHER_AES:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_AES_ECB;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_128_IN_BYTES;
+               break;
+       case CIPHER_AES_CBC_128:
+               sessionSetupData.cipherSetupData.cipherAlgorithm =
+                               CPA_CY_SYM_CIPHER_AES_CBC;
+               sessionSetupData.cipherSetupData.cipherKeyLenInBytes =
+                               KEY_SIZE_128_IN_BYTES;
+               break;
+       default:
+               printf("Crypto: Undefined Cipher specified\n");
+               break;
+       }
+       /* Set the cipher direction */
+       if (isCrypto) {
+               sessionSetupData.cipherSetupData.cipherDirection = crypto_direction;
+               sessionSetupData.cipherSetupData.pCipherKey =
+                               g_crypto_hash_keys.cipher_key;
+               sessionSetupData.symOperation = CPA_CY_SYM_OP_CIPHER;
+       }
+
+       /* Setup Hash common fields */
+       switch (pkt_hash_alg) {
+       case NO_HASH:
+               isHmac = CPA_FALSE;
+               break;
+       case HASH_AES_XCBC:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               AES_XCBC_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_AES_XCBC_96:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_AES_XCBC;
+                               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               AES_XCBC_96_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_MD5:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_MD5;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               MD5_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_SHA1:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA1_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_SHA1_96:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA1;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA1_96_DIGEST_LENGTH_IN_BYTES;
+           break;
+       case HASH_SHA224:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA224;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA224_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_SHA256:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA256;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA256_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_SHA384:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA384;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA384_DIGEST_LENGTH_IN_BYTES;
+               break;
+       case HASH_SHA512:
+               sessionSetupData.hashSetupData.hashAlgorithm = CPA_CY_SYM_HASH_SHA512;
+               sessionSetupData.hashSetupData.digestResultLenInBytes =
+                               SHA512_DIGEST_LENGTH_IN_BYTES;
+               break;
+       default:
+               printf("Crypto: Undefined Hash specified\n");
+               break;
+       }
+       if (isHmac) {
+               sessionSetupData.hashSetupData.hashMode = hashMode;
+               sessionSetupData.symOperation = CPA_CY_SYM_OP_HASH;
+               /* If using authenticated hash setup key lengths */
+               if (CPA_CY_SYM_HASH_MODE_AUTH == hashMode) {
+                       /* Use a common max length key */
+                       sessionSetupData.hashSetupData.authModeSetupData.authKey =
+                                       g_crypto_hash_keys.hash_key;
+                       switch (pkt_hash_alg) {
+                       case HASH_AES_XCBC:
+                       case HASH_AES_XCBC_96:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               AES_XCBC_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_MD5:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA1_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_SHA1:
+                       case HASH_SHA1_96:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA1_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_SHA224:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA224_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_SHA256:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA256_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_SHA384:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA384_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       case HASH_SHA512:
+                               sessionSetupData.hashSetupData.authModeSetupData.authKeyLenInBytes =
+                                               SHA512_AUTH_KEY_LENGTH_IN_BYTES;
+                               break;
+                       default:
+                               printf("Crypto: Undefined Hash specified\n");
+                               return CPA_STATUS_FAIL;
+                       }
+               }
+       }
+
+       /* Only high priority supported */
+       sessionSetupData.sessionPriority = CPA_CY_PRIORITY_HIGH;
+
+       /* If chaining algorithms */
+       if (isCrypto && isHmac) {
+               sessionSetupData.symOperation = CPA_CY_SYM_OP_ALGORITHM_CHAINING;
+               /* @assumption Alg Chain order is cipher then hash for encrypt
+                * and hash then cipher then has for decrypt*/
+               if (CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT == crypto_direction) {
+                       sessionSetupData.algChainOrder =
+                                       CPA_CY_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+               } else {
+                       sessionSetupData.algChainOrder =
+                                       CPA_CY_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+               }
+       }
+       if (!isCrypto && !isHmac) {
+               *ppSessionCtx = NULL;
+               return CPA_STATUS_SUCCESS;
+       }
+
+       /* Get the session context size based on the crypto and/or hash operations*/
+       status = cpaCySymDpSessionCtxGetSize(cyInstanceHandle, &sessionSetupData,
+                       &sessionCtxSizeInBytes);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: cpaCySymDpSessionCtxGetSize error, status: %"PRId32"\n",
+                               status);
+               return CPA_STATUS_FAIL;
+       }
+
+       *ppSessionCtx = alloc_memzone_region(sessionCtxSizeInBytes, lcore_id);
+       if (NULL == *ppSessionCtx) {
+               printf("Crypto: Failed to allocate memory for Session Context\n");
+               return CPA_STATUS_FAIL;
+       }
+
+       status = cpaCySymDpInitSession(cyInstanceHandle, &sessionSetupData,
+                       CPA_TRUE,CPA_FALSE, *ppSessionCtx);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: cpaCySymDpInitSession failed with status %"PRId32"\n", status);
+               return CPA_STATUS_FAIL;
+       }
+       return CPA_STATUS_SUCCESS;
+}
+
+static CpaStatus
+initSessionDataTables(struct qa_core_conf *qaCoreConf,uint32_t lcore_id)
+{
+       Cpa32U i = 0, j = 0;
+       CpaStatus status = CPA_STATUS_FAIL;
+       for (i = 0; i < NUM_CRYPTO; i++) {
+               for (j = 0; j < NUM_HMAC; j++) {
+                       status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
+                                       CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT,
+                                       &qaCoreConf->encryptSessionHandleTbl[i][j],
+                                       qaCoreConf->instanceHandle,
+                                       lcore_id);
+                       if (CPA_STATUS_SUCCESS != status) {
+                               printf("Crypto: Failed to initialize Encrypt sessions\n");
+                               return CPA_STATUS_FAIL;
+                       }
+                       status = initCySymSession(i, j, CPA_CY_SYM_HASH_MODE_AUTH,
+                                       CPA_CY_SYM_CIPHER_DIRECTION_DECRYPT,
+                                       &qaCoreConf->decryptSessionHandleTbl[i][j],
+                                       qaCoreConf->instanceHandle,
+                                       lcore_id);
+                       if (CPA_STATUS_SUCCESS != status) {
+                               printf("Crypto: Failed to initialize Decrypt sessions\n");
+                               return CPA_STATUS_FAIL;
+                       }
+               }
+       }
+       return CPA_STATUS_SUCCESS;
+}
+
+int
+crypto_init(void)
+{
+       if (CPA_STATUS_SUCCESS != icp_sal_userStart("SSL")) {
+               printf("Crypto: Could not start sal for user space\n");
+               return CPA_STATUS_FAIL;
+       }
+       printf("Crypto: icp_sal_userStart(\"SSL\")\n");
+       return 0;
+}
+
+/*
+ * Per core initialisation
+ */
+int
+per_core_crypto_init(uint32_t lcore_id)
+{
+       CpaStatus status = CPA_STATUS_FAIL;
+       char memzone_name[RTE_MEMZONE_NAMESIZE];
+
+       int socketID = rte_lcore_to_socket_id(lcore_id);
+
+       /* Allocate software ring for response messages. */
+
+       qaCoreConf[lcore_id].callbackQueue.head = 0;
+       qaCoreConf[lcore_id].callbackQueue.tail = 0;
+       qaCoreConf[lcore_id].callbackQueue.numEntries = 0;
+       qaCoreConf[lcore_id].kickFreq = 0;
+       qaCoreConf[lcore_id].qaOutstandingRequests = 0;
+       qaCoreConf[lcore_id].numResponseAttempts = 0;
+
+       /* Initialise and reserve lcore memzone for virt2phys translation */
+       rte_snprintf(memzone_name,
+                       RTE_MEMZONE_NAMESIZE,
+                       "lcore_%u",
+                       lcore_id);
+
+       qaCoreConf[lcore_id].lcoreMemzone.memzone = rte_memzone_reserve(
+                       memzone_name,
+                       LCORE_MEMZONE_SIZE,
+                       socketID,
+                       0);
+       if (NULL == qaCoreConf[lcore_id].lcoreMemzone.memzone) {
+               printf("Crypto: Error allocating memzone on lcore %u\n",lcore_id);
+               return -1;
+       }
+       qaCoreConf[lcore_id].lcoreMemzone.next_free_address =
+                                                       qaCoreConf[lcore_id].lcoreMemzone.memzone->addr;
+
+       qaCoreConf[lcore_id].pPacketIV = alloc_memzone_region(IV_LENGTH_16_BYTES,
+                                                       lcore_id);
+
+       if (NULL == qaCoreConf[lcore_id].pPacketIV ) {
+               printf("Crypto: Failed to allocate memory for Initialization Vector\n");
+               return -1;
+       }
+
+       memcpy(qaCoreConf[lcore_id].pPacketIV, &g_crypto_hash_keys.iv,
+                       IV_LENGTH_16_BYTES);
+
+       qaCoreConf[lcore_id].packetIVPhy = qa_v2p(qaCoreConf[lcore_id].pPacketIV);
+       if (0 == qaCoreConf[lcore_id].packetIVPhy) {
+               printf("Crypto: Invalid physical address for Initialization Vector\n");
+               return -1;
+       }
+
+       /*
+        * Obtain the instance handle that is mapped to the current lcore.
+        * This can fail if an instance is not mapped to a bank which has been
+        * affinitized to the current lcore.
+        */
+       status = get_crypto_instance_on_core(&(qaCoreConf[lcore_id].instanceHandle),
+                       lcore_id);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: get_crypto_instance_on_core failed with status: %"PRId32"\n",
+                               status);
+               return -1;
+       }
+
+       status = cpaCySymDpRegCbFunc(qaCoreConf[lcore_id].instanceHandle,
+                       (CpaCySymDpCbFunc) qa_crypto_callback);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: cpaCySymDpRegCbFunc failed with status: %"PRId32"\n", status);
+               return -1;
+       }
+
+       /*
+        * Set the address translation callback for virtual to physcial address
+        * mapping. This will be called by the QAT driver during initialisation only.
+        */
+       status = cpaCySetAddressTranslation(qaCoreConf[lcore_id].instanceHandle,
+                       (CpaVirtualToPhysical) qa_v2p);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: cpaCySetAddressTranslation failed with status: %"PRId32"\n",
+                               status);
+               return -1;
+       }
+
+       status = initSessionDataTables(&qaCoreConf[lcore_id],lcore_id);
+       if (CPA_STATUS_SUCCESS != status) {
+               printf("Crypto: Failed to allocate all session tables.");
+               return -1;
+       }
+       return 0;
+}
+
+static CpaStatus
+enqueueOp(CpaCySymDpOpData *opData, uint32_t lcore_id)
+{
+
+       CpaStatus status;
+
+       /*
+        * Assumption is there is no requirement to do load balancing between
+        * acceleration units - that is one acceleration unit is tied to a core.
+        */
+       opData->instanceHandle = qaCoreConf[lcore_id].instanceHandle;
+
+       if ((++qaCoreConf[lcore_id].kickFreq) % CRYPTO_BURST_TX == 0) {
+               status = cpaCySymDpEnqueueOp(opData, CPA_TRUE);
+       } else {
+               status = cpaCySymDpEnqueueOp(opData, CPA_FALSE);
+       }
+
+       qaCoreConf[lcore_id].qaOutstandingRequests++;
+
+       return status;
+}
+
+void
+crypto_flush_tx_queue(uint32_t lcore_id)
+{
+
+       cpaCySymDpPerformOpNow(qaCoreConf[lcore_id].instanceHandle);
+}
+
+enum crypto_result
+crypto_encrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
+{
+       CpaCySymDpOpData *opData =
+                       (CpaCySymDpOpData *) ((char *) (rte_buff->pkt.data)
+                                       + CRYPTO_OFFSET_TO_OPDATA);
+       uint32_t lcore_id;
+
+       lcore_id = rte_lcore_id();
+
+       bzero(opData, sizeof(CpaCySymDpOpData));
+
+       opData->srcBuffer = opData->dstBuffer = PACKET_DATA_START_PHYS(rte_buff);
+       opData->srcBufferLen = opData->dstBufferLen = rte_buff->pkt.data_len;
+       opData->sessionCtx = qaCoreConf[lcore_id].encryptSessionHandleTbl[c][h];
+       opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+                       + CRYPTO_OFFSET_TO_OPDATA;
+       opData->pCallbackTag = rte_buff;
+
+       /* if no crypto or hash operations are specified return fail */
+       if (NO_CIPHER == c && NO_HASH == h)
+               return CRYPTO_RESULT_FAIL;
+
+       if (NO_CIPHER != c) {
+               opData->pIv = qaCoreConf[lcore_id].pPacketIV;
+               opData->iv = qaCoreConf[lcore_id].packetIVPhy;
+
+               if (CIPHER_AES_CBC_128 == c)
+                       opData->ivLenInBytes = IV_LENGTH_16_BYTES;
+               else
+                       opData->ivLenInBytes = IV_LENGTH_8_BYTES;
+
+               opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
+               opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+                               - CRYPTO_START_OFFSET;
+               /*
+                * Work around for padding, message length has to be a multiple of
+                * block size.
+                */
+               opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
+                               % CIPHER_BLOCK_DEFAULT_SIZE;
+       }
+
+       if (NO_HASH != h) {
+
+               opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
+               opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+                               - HASH_START_OFFSET;
+               /*
+                * Work around for padding, message length has to be a multiple of block
+                * size.
+                */
+               opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
+                               % HASH_BLOCK_DEFAULT_SIZE;
+
+               /*
+                * Assumption: Ok ignore the passed digest pointer and place HMAC at end
+                * of packet.
+                */
+               opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+       }
+
+       if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
+               /*
+                * Failed to place a packet on the hardware queue.
+                * Most likely because the QA hardware is busy.
+                */
+               return CRYPTO_RESULT_FAIL;
+       }
+       return CRYPTO_RESULT_IN_PROGRESS;
+}
+
+enum crypto_result
+crypto_decrypt(struct rte_mbuf *rte_buff, enum cipher_alg c, enum hash_alg h)
+{
+
+       CpaCySymDpOpData *opData = (void*) (((char *) rte_buff->pkt.data)
+                       + CRYPTO_OFFSET_TO_OPDATA);
+       uint32_t lcore_id;
+
+       lcore_id = rte_lcore_id();
+
+       bzero(opData, sizeof(CpaCySymDpOpData));
+
+       opData->dstBuffer = opData->srcBuffer = PACKET_DATA_START_PHYS(rte_buff);
+       opData->dstBufferLen = opData->srcBufferLen = rte_buff->pkt.data_len;
+       opData->thisPhys = PACKET_DATA_START_PHYS(rte_buff)
+                       + CRYPTO_OFFSET_TO_OPDATA;
+       opData->sessionCtx = qaCoreConf[lcore_id].decryptSessionHandleTbl[c][h];
+       opData->pCallbackTag = rte_buff;
+
+       /* if no crypto or hmac operations are specified return fail */
+       if (NO_CIPHER == c && NO_HASH == h)
+               return CRYPTO_RESULT_FAIL;
+
+       if (NO_CIPHER != c) {
+               opData->pIv = qaCoreConf[lcore_id].pPacketIV;
+               opData->iv = qaCoreConf[lcore_id].packetIVPhy;
+
+               if (CIPHER_AES_CBC_128 == c)
+                       opData->ivLenInBytes = IV_LENGTH_16_BYTES;
+               else
+                       opData->ivLenInBytes = IV_LENGTH_8_BYTES;
+
+               opData->cryptoStartSrcOffsetInBytes = CRYPTO_START_OFFSET;
+               opData->messageLenToCipherInBytes = rte_buff->pkt.data_len
+                               - CRYPTO_START_OFFSET;
+
+               /*
+                * Work around for padding, message length has to be a multiple of block
+                * size.
+                */
+               opData->messageLenToCipherInBytes -= opData->messageLenToCipherInBytes
+                               % CIPHER_BLOCK_DEFAULT_SIZE;
+       }
+       if (NO_HASH != h) {
+               opData->hashStartSrcOffsetInBytes = HASH_START_OFFSET;
+               opData->messageLenToHashInBytes = rte_buff->pkt.data_len
+                               - HASH_START_OFFSET;
+               /*
+                * Work around for padding, message length has to be a multiple of block
+                * size.
+                */
+               opData->messageLenToHashInBytes -= opData->messageLenToHashInBytes
+                               % HASH_BLOCK_DEFAULT_SIZE;
+               opData->digestResult = rte_buff->buf_physaddr + rte_buff->pkt.data_len;
+       }
+
+       if (CPA_STATUS_SUCCESS != enqueueOp(opData, lcore_id)) {
+               /*
+                * Failed to place a packet on the hardware queue.
+                * Most likely because the QA hardware is busy.
+                */
+               return CRYPTO_RESULT_FAIL;
+       }
+       return CRYPTO_RESULT_IN_PROGRESS;
+}
+
+void *
+crypto_get_next_response(void)
+{
+       uint32_t lcore_id;
+       lcore_id = rte_lcore_id();
+       struct qa_callbackQueue *callbackQ = &(qaCoreConf[lcore_id].callbackQueue);
+       void *entry = NULL;
+
+       if (callbackQ->numEntries) {
+               entry = callbackQ->qaCallbackRing[callbackQ->tail];
+               callbackQ->tail++;
+               callbackQ->numEntries--;
+       }
+
+       /* If there are no outstanding requests no need to poll, return entry */
+       if (qaCoreConf[lcore_id].qaOutstandingRequests == 0)
+               return entry;
+
+       if (callbackQ->numEntries < CRYPTO_QUEUED_RESP_POLL_THRESHOLD
+                       && qaCoreConf[lcore_id].numResponseAttempts++
+                                       % GET_NEXT_RESPONSE_FREQ == 0) {
+               /*
+                * Only poll the hardware when there is less than
+                * CRYPTO_QUEUED_RESP_POLL_THRESHOLD elements in the software queue
+                */
+               icp_sal_CyPollDpInstance(qaCoreConf[lcore_id].instanceHandle,
+                               CRYPTO_MAX_RESPONSE_QUOTA);
+       }
+       return entry;
+}
diff --git a/examples/dpdk_qat/crypto.h b/examples/dpdk_qat/crypto.h
new file mode 100644 (file)
index 0000000..13a06ab
--- /dev/null
@@ -0,0 +1,88 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef CRYPTO_H_
+#define CRYPTO_H_
+
+/* Pass Labels/Values to crypto units */
+enum cipher_alg {
+       /* Option to not do any cryptography */
+       NO_CIPHER,
+       CIPHER_DES,
+       CIPHER_DES_CBC,
+       CIPHER_DES3,
+       CIPHER_DES3_CBC,
+       CIPHER_AES,
+       CIPHER_AES_CBC_128,
+};
+
+enum hash_alg {
+       /* Option to not do any hash */
+       NO_HASH,
+       HASH_MD5,
+       HASH_SHA1,
+       HASH_SHA1_96,
+       HASH_SHA224,
+       HASH_SHA256,
+       HASH_SHA384,
+       HASH_SHA512,
+       HASH_AES_XCBC,
+       HASH_AES_XCBC_96
+};
+
+/* Return value from crypto_{encrypt/decrypt} */
+enum crypto_result {
+       /* Packet was successfully put into crypto queue */
+       CRYPTO_RESULT_IN_PROGRESS,
+       /* Cryptography has failed in some way */
+       CRYPTO_RESULT_FAIL,
+};
+
+extern enum crypto_result crypto_encrypt(struct rte_mbuf *pkt, enum cipher_alg c,
+               enum hash_alg h);
+extern enum crypto_result crypto_decrypt(struct rte_mbuf *pkt, enum cipher_alg c,
+               enum hash_alg h);
+
+extern int crypto_init(void);
+
+extern int per_core_crypto_init(uint32_t lcore_id);
+
+extern void crypto_exit(void);
+
+extern void *crypto_get_next_response(void);
+
+extern void crypto_flush_tx_queue(uint32_t lcore_id);
+
+#endif /* CRYPTO_H_ */
diff --git a/examples/dpdk_qat/main.c b/examples/dpdk_qat/main.c
new file mode 100644 (file)
index 0000000..2a4a0ec
--- /dev/null
@@ -0,0 +1,857 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+#include "crypto.h"
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   (32 * 1024)
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
+#define TSC_COUNT_LIMIT 1000
+
+#define ACTION_ENCRYPT 1
+#define ACTION_DECRYPT 2
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
+
+/* mask of enabled ports */
+static unsigned enabled_port_mask = 0;
+static int promiscuous_on = 1; /**< Ports set in promiscuous mode on by default. */
+
+struct mbuf_table {
+       uint16_t len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+struct lcore_rx_queue {
+       uint8_t port_id;
+       uint8_t queue_id;
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+
+#define MAX_LCORE_PARAMS 1024
+struct lcore_params {
+       uint8_t port_id;
+       uint8_t queue_id;
+       uint8_t lcore_id;
+};
+
+static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+static struct lcore_params lcore_params_array_default[] = {
+       {0, 0, 2},
+       {0, 1, 2},
+       {0, 2, 2},
+       {1, 0, 2},
+       {1, 1, 2},
+       {1, 2, 2},
+       {2, 0, 2},
+       {3, 0, 3},
+       {3, 1, 3},
+};
+
+static struct lcore_params * lcore_params = lcore_params_array_default;
+static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
+                               sizeof(lcore_params_array_default[0]);
+
+static struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .rx_adv_conf = {
+               .rss_conf = {
+                       .rss_key = NULL,
+                       .rss_hf = ETH_RSS_IPV4,
+               },
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+static struct rte_mempool * pktmbuf_pool[RTE_MAX_NUMA_NODES];
+
+struct lcore_conf {
+       uint64_t tsc;
+       uint64_t tsc_count;
+       uint32_t tx_mask;
+       uint16_t n_rx_queue;
+       uint16_t rx_queue_list_pos;
+       struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       uint16_t tx_queue_id[RTE_MAX_ETHPORTS];
+       struct mbuf_table rx_mbuf;
+       uint32_t rx_mbuf_pos;
+       uint32_t rx_curr_queue;
+       struct mbuf_table tx_mbufs[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
+static inline struct rte_mbuf *
+nic_rx_get_packet(struct lcore_conf *qconf)
+{
+       struct rte_mbuf *pkt;
+
+       if (unlikely(qconf->n_rx_queue == 0))
+               return NULL;
+
+       /* Look for the next queue with packets; return if none */
+       if (unlikely(qconf->rx_mbuf_pos == qconf->rx_mbuf.len)) {
+               uint32_t i;
+
+               qconf->rx_mbuf_pos = 0;
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+                       qconf->rx_mbuf.len = rte_eth_rx_burst(
+                               qconf->rx_queue_list[qconf->rx_curr_queue].port_id,
+                               qconf->rx_queue_list[qconf->rx_curr_queue].queue_id,
+                               qconf->rx_mbuf.m_table, MAX_PKT_BURST);
+
+                       qconf->rx_curr_queue++;
+                       if (unlikely(qconf->rx_curr_queue == qconf->n_rx_queue))
+                               qconf->rx_curr_queue = 0;
+                       if (likely(qconf->rx_mbuf.len > 0))
+                               break;
+               }
+               if (unlikely(i == qconf->n_rx_queue))
+                       return NULL;
+       }
+
+       /* Get the next packet from the current queue; if last packet, go to next queue */
+       pkt = qconf->rx_mbuf.m_table[qconf->rx_mbuf_pos];
+       qconf->rx_mbuf_pos++;
+
+       return pkt;
+}
+
+static inline void
+nic_tx_flush_queues(struct lcore_conf *qconf)
+{
+       uint8_t portid;
+
+       for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
+               struct rte_mbuf **m_table = NULL;
+               uint16_t queueid, len;
+               uint32_t n, i;
+
+               if (likely((qconf->tx_mask & (1 << portid)) == 0))
+                       continue;
+
+               len = qconf->tx_mbufs[portid].len;
+               if (likely(len == 0))
+                       continue;
+
+               queueid = qconf->tx_queue_id[portid];
+               m_table = qconf->tx_mbufs[portid].m_table;
+
+               n = rte_eth_tx_burst(portid, queueid, m_table, len);
+               for (i = n; i < len; i++){
+                       rte_pktmbuf_free(m_table[i]);
+               }
+
+               qconf->tx_mbufs[portid].len = 0;
+       }
+
+       qconf->tx_mask = TX_QUEUE_FLUSH_MASK;
+}
+
+static inline void
+nic_tx_send_packet(struct rte_mbuf *pkt, uint8_t port)
+{
+       struct lcore_conf *qconf;
+       uint32_t lcoreid;
+       uint16_t len;
+
+       if (unlikely(pkt == NULL)) {
+               return;
+       }
+
+       lcoreid = rte_lcore_id();
+       qconf = &lcore_conf[lcoreid];
+
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = pkt;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               uint32_t n, i;
+               uint16_t queueid;
+
+               queueid = qconf->tx_queue_id[port];
+               n = rte_eth_tx_burst(port, queueid, qconf->tx_mbufs[port].m_table, MAX_PKT_BURST);
+               for (i = n; i < MAX_PKT_BURST; i++){
+                       rte_pktmbuf_free(qconf->tx_mbufs[port].m_table[i]);
+               }
+
+               qconf->tx_mask &= ~(1 << port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+}
+
+static inline uint8_t
+get_output_port(uint8_t input_port)
+{
+       return (uint8_t)(input_port ^ 1);
+}
+
+/* main processing loop */
+static __attribute__((noreturn)) int
+main_loop(__attribute__((unused)) void *dummy)
+{
+       uint32_t lcoreid;
+       struct lcore_conf *qconf;
+
+       lcoreid = rte_lcore_id();
+       qconf = &lcore_conf[lcoreid];
+
+       printf("Thread %u starting...\n", lcoreid);
+
+       for (;;) {
+               struct rte_mbuf *pkt;
+               uint32_t pkt_from_nic_rx = 0;
+               uint8_t port;
+
+               /* Flush TX queues */
+               qconf->tsc_count++;
+               if (unlikely(qconf->tsc_count == TSC_COUNT_LIMIT)) {
+                       uint64_t tsc, diff_tsc;
+
+                       tsc = rte_rdtsc();
+
+                       diff_tsc = tsc - qconf->tsc;
+                       if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+                               nic_tx_flush_queues(qconf);
+                               crypto_flush_tx_queue(lcoreid);
+                               qconf->tsc = tsc;
+                       }
+
+                       qconf->tsc_count = 0;
+               }
+
+               /*
+                * Check the Intel QuickAssist queues first
+                *
+                ***/
+               pkt = (struct rte_mbuf *) crypto_get_next_response();
+               if (pkt == NULL) {
+                       pkt = nic_rx_get_packet(qconf);
+                       pkt_from_nic_rx = 1;
+               }
+               if (pkt == NULL)
+                       continue;
+               /* Send packet to either QAT encrypt, QAT decrypt or NIC TX */
+               if (pkt_from_nic_rx) {
+                       struct ipv4_hdr *ip  = (struct ipv4_hdr *) (rte_pktmbuf_mtod(pkt, unsigned char *) +
+                                       sizeof(struct ether_hdr));
+                       if (ip->src_addr & rte_cpu_to_be_32(ACTION_ENCRYPT)) {
+                               if (CRYPTO_RESULT_FAIL == crypto_encrypt(pkt,
+                                       (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
+                                       (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
+                                       rte_pktmbuf_free(pkt);
+                               continue;
+                       }
+
+                       if (ip->src_addr & rte_cpu_to_be_32(ACTION_DECRYPT)) {
+                               if(CRYPTO_RESULT_FAIL == crypto_decrypt(pkt,
+                                       (enum cipher_alg)((ip->src_addr >> 16) & 0xFF),
+                                       (enum hash_alg)((ip->src_addr >> 8) & 0xFF)))
+                                       rte_pktmbuf_free(pkt);
+                               continue;
+                       }
+               }
+
+               port = get_output_port(pkt->pkt.in_port);
+
+               /* Transmit the packet */
+               nic_tx_send_packet(pkt, port);
+       }
+}
+
+static inline unsigned
+get_port_max_rx_queues(uint8_t port_id)
+{
+       struct rte_eth_dev_info dev_info;
+
+       rte_eth_dev_info_get(port_id, &dev_info);
+       return dev_info.max_rx_queues;
+}
+
+static inline unsigned
+get_port_max_tx_queues(uint8_t port_id)
+{
+       struct rte_eth_dev_info dev_info;
+
+       rte_eth_dev_info_get(port_id, &dev_info);
+       return dev_info.max_tx_queues;
+}
+
+static int
+check_lcore_params(void)
+{
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               if (lcore_params[i].queue_id >= get_port_max_rx_queues(lcore_params[i].port_id)) {
+                       printf("invalid queue number: %hhu\n", lcore_params[i].queue_id);
+                       return -1;
+               }
+               if (!rte_lcore_is_enabled(lcore_params[i].lcore_id)) {
+                       printf("error: lcore %hhu is not enabled in lcore mask\n",
+                               lcore_params[i].lcore_id);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static int
+check_port_config(const unsigned nb_ports)
+{
+       unsigned portid;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               portid = lcore_params[i].port_id;
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("port %u is not enabled in port mask\n", portid);
+                       return -1;
+               }
+               if (portid >= nb_ports) {
+                       printf("port %u is not present on the board\n", portid);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static uint8_t
+get_port_n_rx_queues(const uint8_t port)
+{
+       int queue = -1;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
+                       queue = lcore_params[i].queue_id;
+       }
+       return (uint8_t)(++queue);
+}
+
+static int
+init_lcore_rx_queues(void)
+{
+       uint16_t i, nb_rx_queue;
+       uint8_t lcore;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               lcore = lcore_params[i].lcore_id;
+               nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+               if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+                       printf("error: too many queues (%u) for lcore: %u\n",
+                               (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+                       return -1;
+               }
+               lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+                       lcore_params[i].port_id;
+               lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
+                       lcore_params[i].queue_id;
+               lcore_conf[lcore].n_rx_queue++;
+       }
+       return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+       printf ("%s [EAL options] -- -p PORTMASK [--no-promisc]"
+               "  [--config (port,queue,lcore)[,(port,queue,lcore]]\n"
+               "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+               "  --no-promisc: disable promiscuous mode (default is ON)\n"
+               "  --config (port,queue,lcore): rx queues configuration\n",
+               prgname);
+}
+
+static unsigned
+parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+
+       return pm;
+}
+
+static int
+parse_config(const char *q_arg)
+{
+       char s[256];
+       const char *p, *p_end = q_arg;
+       char *end;
+       enum fieldnames {
+               FLD_PORT = 0,
+               FLD_QUEUE,
+               FLD_LCORE,
+               _NUM_FLD
+       };
+       unsigned long int_fld[_NUM_FLD];
+       char *str_fld[_NUM_FLD];
+       int i;
+       unsigned size;
+
+       nb_lcore_params = 0;
+
+       while ((p = strchr(p_end,'(')) != NULL) {
+               if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+                       printf("exceeded max number of lcore params: %hu\n",
+                               nb_lcore_params);
+                       return -1;
+               }
+               ++p;
+               if((p_end = strchr(p,')')) == NULL)
+                       return -1;
+
+               size = p_end - p;
+               if(size >= sizeof(s))
+                       return -1;
+
+               rte_snprintf(s, sizeof(s), "%.*s", size, p);
+               if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+                       return -1;
+               for (i = 0; i < _NUM_FLD; i++) {
+                       errno = 0;
+                       int_fld[i] = strtoul(str_fld[i], &end, 0);
+                       if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+                               return -1;
+               }
+               lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+               lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
+               lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
+               ++nb_lcore_params;
+       }
+       lcore_params = lcore_params_array;
+       return 0;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {"config", 1, 0, 0},
+               {"no-promisc", 0, 0, 0},
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:",
+                               lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       enabled_port_mask = parse_portmask(optarg);
+                       if (enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       if (strcmp(lgopts[option_index].name, "config") == 0) {
+                               ret = parse_config(optarg);
+                               if (ret) {
+                                       printf("invalid config\n");
+                                       print_usage(prgname);
+                                       return -1;
+                               }
+                       }
+                       if (strcmp(lgopts[option_index].name, "no-promisc") == 0) {
+                               printf("Promiscuous mode disabled\n");
+                               promiscuous_on = 0;
+                       }
+                       break;
+               default:
+                       print_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (enabled_port_mask == 0) {
+               printf("portmask not specified\n");
+               print_usage(prgname);
+               return -1;
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+static void
+print_ethaddr(const char *name, const struct ether_addr *eth_addr)
+{
+       printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+               eth_addr->addr_bytes[0],
+               eth_addr->addr_bytes[1],
+               eth_addr->addr_bytes[2],
+               eth_addr->addr_bytes[3],
+               eth_addr->addr_bytes[4],
+               eth_addr->addr_bytes[5]);
+}
+
+static int
+init_mem(void)
+{
+       const unsigned flags = 0;
+       int socketid;
+       unsigned lcoreid;
+       char s[64];
+
+       RTE_LCORE_FOREACH(lcoreid) {
+               socketid = rte_lcore_to_socket_id(lcoreid);
+               if (socketid >= RTE_MAX_NUMA_NODES) {
+                       printf("Socket %d of lcore %u is out of range %d\n",
+                               socketid, lcoreid, RTE_MAX_NUMA_NODES);
+                       return -1;
+               }
+               if (pktmbuf_pool[socketid] == NULL) {
+                       rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
+                       pktmbuf_pool[socketid] =
+                               rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
+                                       sizeof(struct rte_pktmbuf_pool_private),
+                                       rte_pktmbuf_pool_init, NULL,
+                                       rte_pktmbuf_init, NULL,
+                                       socketid, flags);
+                       if (pktmbuf_pool[socketid] == NULL) {
+                               printf("Cannot init mbuf pool on socket %d\n", socketid);
+                               return -1;
+                       }
+                       printf("Allocated mbuf pool on socket %d\n", socketid);
+               }
+       }
+       return 0;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_conf *qconf;
+       struct rte_eth_link link;
+       int ret;
+       unsigned nb_ports;
+       uint16_t queueid;
+       unsigned lcoreid;
+       uint32_t nb_tx_queue;
+       uint8_t portid, nb_rx_queue, queue, socketid;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               return -1;
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = parse_args(argc, argv);
+       if (ret < 0)
+               return -1;
+
+       /* init driver */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_panic("Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_panic("Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_panic("Cannot probe PCI\n");
+
+       if (check_lcore_params() < 0)
+               rte_panic("check_lcore_params failed\n");
+
+       ret = init_lcore_rx_queues();
+       if (ret < 0)
+               return -1;
+
+       ret = init_mem();
+       if (ret < 0)
+               return -1;
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports > RTE_MAX_ETHPORTS)
+               nb_ports = RTE_MAX_ETHPORTS;
+
+       if (check_port_config(nb_ports) < 0)
+               rte_panic("check_port_config failed\n");
+
+       /* initialize all ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("\nSkipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               /* init port */
+               printf("Initializing port %d ... ", portid );
+               fflush(stdout);
+
+               nb_rx_queue = get_port_n_rx_queues(portid);
+               if (nb_rx_queue > get_port_max_rx_queues(portid))
+                       rte_panic("Number of rx queues %d exceeds max number of rx queues %u"
+                               " for port %d\n", nb_rx_queue, get_port_max_rx_queues(portid),
+                               portid);
+               nb_tx_queue = rte_lcore_count();
+               if (nb_tx_queue > get_port_max_tx_queues(portid))
+                       rte_panic("Number of lcores %u exceeds max number of tx queues %u"
+                               " for port %d\n", nb_tx_queue, get_port_max_tx_queues(portid),
+                               portid);
+               printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+                       nb_rx_queue, (unsigned)nb_tx_queue );
+               ret = rte_eth_dev_configure(portid, nb_rx_queue,
+                                       (uint16_t)nb_tx_queue, &port_conf);
+               if (ret < 0)
+                       rte_panic("Cannot configure device: err=%d, port=%d\n",
+                               ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+
+               /* init one TX queue per couple (lcore,port) */
+               queueid = 0;
+               RTE_LCORE_FOREACH(lcoreid) {
+                       socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
+                       printf("txq=%u,%d,%d ", lcoreid, queueid, socketid);
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+                                                    socketid, &tx_conf);
+                       if (ret < 0)
+                               rte_panic("rte_eth_tx_queue_setup: err=%d, "
+                                       "port=%d\n", ret, portid);
+
+                       qconf = &lcore_conf[lcoreid];
+                       qconf->tx_queue_id[portid] = queueid;
+                       queueid++;
+               }
+               printf("\n");
+       }
+
+       RTE_LCORE_FOREACH(lcoreid) {
+               qconf = &lcore_conf[lcoreid];
+               printf("\nInitializing rx queues on lcore %u ... ", lcoreid );
+               fflush(stdout);
+               /* init RX queues */
+               for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
+                       portid = qconf->rx_queue_list[queue].port_id;
+                       queueid = qconf->rx_queue_list[queue].queue_id;
+                       socketid = (uint8_t)rte_lcore_to_socket_id(lcoreid);
+                       printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+                       fflush(stdout);
+
+                       ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+                                       socketid, &rx_conf, pktmbuf_pool[socketid]);
+                       if (ret < 0)
+                               rte_panic("rte_eth_rx_queue_setup: err=%d,"
+                                               "port=%d\n", ret, portid);
+               }
+       }
+
+       printf("\n");
+
+       /* start ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               if ((enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_panic("rte_eth_dev_start: err=%d, port=%d\n",
+                               ret, portid);
+
+               printf("done: Port %d ", portid);
+
+               /* get link status */
+               rte_eth_link_get(portid, &link);
+               if (link.link_status)
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (unsigned) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               else
+                       printf(" Link Down\n");
+               /*
+                * If enabled, put device in promiscuous mode.
+                * This allows IO forwarding mode to forward packets
+                * to itself through 2 cross-connected  ports of the
+                * target machine.
+                */
+               if (promiscuous_on)
+                       rte_eth_promiscuous_enable(portid);
+       }
+       printf("Crypto: Initializing Crypto...\n");
+       if (crypto_init() != 0)
+               return -1;
+
+       RTE_LCORE_FOREACH(lcoreid) {
+               if (per_core_crypto_init(lcoreid) != 0) {
+               printf("Crypto: Cannot init lcore crypto on lcore %u\n", (unsigned)lcoreid);
+                       return -1;
+               }
+       }
+       printf("Crypto: Initialization complete\n");
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcoreid) {
+               if (rte_eal_wait_lcore(lcoreid) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/dpdk_qat/main.h b/examples/dpdk_qat/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf b/examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..aa646d9
Binary files /dev/null and b/examples/exception_path/482248_ExceptionPath_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/exception_path/Makefile b/examples/exception_path/Makefile
new file mode 100644 (file)
index 0000000..6e487f1
--- /dev/null
@@ -0,0 +1,57 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(error This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+endif
+
+# binary name
+APP = exception_path
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
new file mode 100644 (file)
index 0000000..6cf05a8
--- /dev/null
@@ -0,0 +1,569 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <netinet/in.h>
+#include <linux/if.h>
+#include <linux/if_tun.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <unistd.h>
+#include <signal.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+/* Macros for printing using RTE_LOG */
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+#define FATAL_ERROR(fmt, args...)       rte_exit(EXIT_FAILURE, fmt "\n", ##args)
+#define PRINT_INFO(fmt, args...)        RTE_LOG(INFO, APP, fmt "\n", ##args)
+
+/* NUMA socket to allocate mbuf pool on */
+#define SOCKET                  0
+
+/* Max ports than can be used (each port is associated with two lcores) */
+#define MAX_PORTS               (RTE_MAX_LCORE / 2)
+
+/* Max size of a single packet */
+#define MAX_PACKET_SZ           2048
+
+/* Number of bytes needed for each mbuf */
+#define MBUF_SZ \
+       (MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+/* Number of mbufs in mempool that is created */
+#define NB_MBUF                 8192
+
+/* How many packets to attempt to read from NIC in one go */
+#define PKT_BURST_SZ            32
+
+/* How many objects (mbufs) to keep in per-lcore mempool cache */
+#define MEMPOOL_CACHE_SZ        PKT_BURST_SZ
+
+/* Number of RX ring descriptors */
+#define NB_RXD                  128
+
+/* Number of TX ring descriptors */
+#define NB_TXD                  512
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+/* RX ring configuration */
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = 8,   /* Ring prefetch threshold */
+               .hthresh = 8,   /* Ring host threshold */
+               .wthresh = 4,   /* Ring writeback threshold */
+       },
+       .rx_free_thresh = 0,    /* Immediately free RX descriptors */
+};
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+/* TX ring configuration */
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = 36,  /* Ring prefetch threshold */
+               .hthresh = 0,   /* Ring host threshold */
+               .wthresh = 0,   /* Ring writeback threshold */
+       },
+       .tx_free_thresh = 0,    /* Use PMD default values */
+       .tx_rs_thresh = 0,      /* Use PMD default values */
+};
+
+/* Options for configuring ethernet port */
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .header_split = 0,      /* Header Split disabled */
+               .hw_ip_checksum = 0,    /* IP checksum offload disabled */
+               .hw_vlan_filter = 0,    /* VLAN filtering disabled */
+               .jumbo_frame = 0,       /* Jumbo Frame Support disabled */
+               .hw_strip_crc = 0,      /* CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+};
+
+/* Mempool for mbufs */
+static struct rte_mempool * pktmbuf_pool = NULL;
+
+/* Mask of enabled ports */
+static uint32_t ports_mask = 0;
+
+/* Mask of cores that read from NIC and write to tap */
+static uint32_t input_cores_mask = 0;
+
+/* Mask of cores that read from tap and write to NIC */
+static uint32_t output_cores_mask = 0;
+
+/* Array storing port_id that is associated with each lcore */
+static uint8_t port_ids[RTE_MAX_LCORE];
+
+/* Structure type for recording lcore-specific stats */
+struct stats {
+       uint64_t rx;
+       uint64_t tx;
+       uint64_t dropped;
+};
+
+/* Array of lcore-specific stats */
+static struct stats lcore_stats[RTE_MAX_LCORE];
+
+/* Print out statistics on packets handled */
+static void
+print_stats(void)
+{
+       unsigned i;
+
+       printf("\n**Exception-Path example application statistics**\n"
+              "=======  ======  ============  ============  ===============\n"
+              " Lcore    Port            RX            TX    Dropped on TX\n"
+              "-------  ------  ------------  ------------  ---------------\n");
+       RTE_LCORE_FOREACH(i) {
+               printf("%6u %7u %13"PRIu64" %13"PRIu64" %16"PRIu64"\n",
+                      i, (unsigned)port_ids[i],
+                      lcore_stats[i].rx, lcore_stats[i].tx,
+                      lcore_stats[i].dropped);
+       }
+       printf("=======  ======  ============  ============  ===============\n");
+}
+
+/* Custom handling of signals to handle stats */
+static void
+signal_handler(int signum)
+{
+       /* When we receive a USR1 signal, print stats */
+       if (signum == SIGUSR1) {
+               print_stats();
+       }
+
+       /* When we receive a USR2 signal, reset stats */
+       if (signum == SIGUSR2) {
+               memset(&lcore_stats, 0, sizeof(lcore_stats));
+               printf("\n**Statistics have been reset**\n");
+               return;
+       }
+}
+
+/*
+ * Create a tap network interface, or use existing one with same name.
+ * If name[0]='\0' then a name is automatically assigned and returned in name.
+ */
+static int tap_create(char *name)
+{
+       struct ifreq ifr;
+       int fd, ret;
+
+       fd = open("/dev/net/tun", O_RDWR);
+       if (fd < 0)
+               return fd;
+
+       memset(&ifr, 0, sizeof(ifr));
+
+       /* TAP device without packet information */
+       ifr.ifr_flags = IFF_TAP | IFF_NO_PI;
+
+       if (name && *name)
+               rte_snprintf(ifr.ifr_name, IFNAMSIZ, name);
+
+       ret = ioctl(fd, TUNSETIFF, (void *) &ifr);
+       if (ret < 0) {
+               close(fd);
+               return ret;
+       }
+
+       if (name)
+               rte_snprintf(name, IFNAMSIZ, ifr.ifr_name);
+
+       return fd;
+}
+
+/* Main processing loop */
+static  __attribute__((noreturn)) int
+main_loop(__attribute__((unused)) void *arg)
+{
+       const unsigned lcore_id = rte_lcore_id();
+       char tap_name[IFNAMSIZ];
+       int tap_fd;
+
+       /* Create new tap interface */
+       rte_snprintf(tap_name, IFNAMSIZ, "tap_dpdk_%.2u", lcore_id);
+       tap_fd = tap_create(tap_name);
+       if (tap_fd < 0)
+               FATAL_ERROR("Could not create tap interface \"%s\" (%d)",
+                           tap_name, tap_fd);
+
+       if ((1 << lcore_id) & input_cores_mask) {
+               PRINT_INFO("Lcore %u is reading from port %u and writing to %s",
+                          lcore_id, (unsigned)port_ids[lcore_id], tap_name);
+               fflush(stdout);
+               /* Loop forever reading from NIC and writing to tap */
+               for (;;) {
+                       struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
+                       unsigned i;
+                       const unsigned nb_rx =
+                                       rte_eth_rx_burst(port_ids[lcore_id], 0,
+                                           pkts_burst, PKT_BURST_SZ);
+                       lcore_stats[lcore_id].rx += nb_rx;
+                       for (i = 0; likely(i < nb_rx); i++) {
+                               struct rte_mbuf *m = pkts_burst[i];
+                               /* Ignore return val from write() */
+                               int ret = write(tap_fd,
+                                               rte_pktmbuf_mtod(m, void*),
+                                               rte_pktmbuf_data_len(m));
+                               rte_pktmbuf_free(m);
+                               if (unlikely(ret < 0))
+                                       lcore_stats[lcore_id].dropped++;
+                               else
+                                       lcore_stats[lcore_id].tx++;
+                       }
+               }
+       }
+       else if ((1 << lcore_id) & output_cores_mask) {
+               PRINT_INFO("Lcore %u is reading from %s and writing to port %u",
+                          lcore_id, tap_name, (unsigned)port_ids[lcore_id]);
+               fflush(stdout);
+               /* Loop forever reading from tap and writing to NIC */
+               for (;;) {
+                       int ret;
+                       struct rte_mbuf *m = rte_pktmbuf_alloc(pktmbuf_pool);
+                       if (m == NULL)
+                               continue;
+
+                       ret = read(tap_fd, m->pkt.data, MAX_PACKET_SZ);
+                       lcore_stats[lcore_id].rx++;
+                       if (unlikely(ret < 0)) {
+                               FATAL_ERROR("Reading from %s interface failed",
+                                           tap_name);
+                       }
+                       m->pkt.nb_segs = 1;
+                       m->pkt.next = NULL;
+                       m->pkt.pkt_len = (uint16_t)ret;
+                       m->pkt.data_len = (uint16_t)ret;
+                       ret = rte_eth_tx_burst(port_ids[lcore_id], 0, &m, 1);
+                       if (unlikely(ret < 1)) {
+                               rte_pktmbuf_free(m);
+                               lcore_stats[lcore_id].dropped++;
+                       }
+                       else {
+                               lcore_stats[lcore_id].tx++;
+                       }
+               }
+       }
+       else {
+               PRINT_INFO("Lcore %u has nothing to do", lcore_id);
+               for (;;)
+                       ; /* loop doing nothing */
+       }
+       /*
+        * Tap file is closed automatically when program exits. Putting close()
+        * here will cause the compiler to give an error about unreachable code.
+        */
+}
+
+/* Display usage instructions */
+static void
+print_usage(const char *prgname)
+{
+       PRINT_INFO("\nUsage: %s [EAL options] -- -p PORTMASK -i IN_CORES -o OUT_CORES\n"
+                  "    -p PORTMASK: hex bitmask of ports to use\n"
+                  "    -i IN_CORES: hex bitmask of cores which read from NIC\n"
+                  "    -o OUT_CORES: hex bitmask of cores which write to NIC",
+                  prgname);
+}
+
+/* Convert string to unsigned number. 0 is returned if error occurs */
+static uint32_t
+parse_unsigned(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long num;
+
+       num = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+
+       return (uint32_t)num;
+}
+
+/* Record affinities between ports and lcores in global port_ids[] array */
+static void
+setup_port_lcore_affinities(void)
+{
+       unsigned i;
+       uint8_t tx_port = 0;
+       uint8_t rx_port = 0;
+
+       /* Setup port_ids[] array, and check masks were ok */
+       RTE_LCORE_FOREACH(i) {
+               if (input_cores_mask & (1 << i)) {
+                       /* Skip ports that are not enabled */
+                       while ((ports_mask & (1 << rx_port)) == 0) {
+                               rx_port++;
+                               if (rx_port > (sizeof(ports_mask) * 8))
+                                       goto fail; /* not enough ports */
+                       }
+
+                       port_ids[i] = rx_port++;
+               }
+               else if (output_cores_mask & (1 << i)) {
+                       /* Skip ports that are not enabled */
+                       while ((ports_mask & (1 << tx_port)) == 0) {
+                               tx_port++;
+                               if (tx_port > (sizeof(ports_mask) * 8))
+                                       goto fail; /* not enough ports */
+                       }
+
+                       port_ids[i] = tx_port++;
+               }
+       }
+
+       if (rx_port != tx_port)
+               goto fail; /* uneven number of cores in masks */
+
+       if (ports_mask & (~((1 << rx_port) - 1)))
+               goto fail; /* unused ports */
+
+       return;
+fail:
+       FATAL_ERROR("Invalid core/port masks specified on command line");
+}
+
+/* Parse the arguments given in the command line of the application */
+static void
+parse_args(int argc, char **argv)
+{
+       int opt;
+       const char *prgname = argv[0];
+
+       /* Disable printing messages within getopt() */
+       opterr = 0;
+
+       /* Parse command line */
+       while ((opt = getopt(argc, argv, "i:o:p:")) != EOF) {
+               switch (opt) {
+               case 'i':
+                       input_cores_mask = parse_unsigned(optarg);
+                       break;
+               case 'o':
+                       output_cores_mask = parse_unsigned(optarg);
+                       break;
+               case 'p':
+                       ports_mask = parse_unsigned(optarg);
+                       break;
+               default:
+                       print_usage(prgname);
+                       FATAL_ERROR("Invalid option specified");
+               }
+       }
+
+       /* Check that options were parsed ok */
+       if (input_cores_mask == 0) {
+               print_usage(prgname);
+               FATAL_ERROR("IN_CORES not specified correctly");
+       }
+       if (output_cores_mask == 0) {
+               print_usage(prgname);
+               FATAL_ERROR("OUT_CORES not specified correctly");
+       }
+       if (ports_mask == 0) {
+               print_usage(prgname);
+               FATAL_ERROR("PORTMASK not specified correctly");
+       }
+
+       setup_port_lcore_affinities();
+}
+
+/* Initialise a single port on an Ethernet device */
+static void
+init_port(uint8_t port)
+{
+       struct rte_eth_link link;
+       int ret;
+
+       /* Initialise device and RX/TX queues */
+       PRINT_INFO("Initialising port %u ...", (unsigned)port);
+       fflush(stdout);
+       ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
+       if (ret < 0)
+               FATAL_ERROR("Could not configure port%u (%d)",
+                           (unsigned)port, ret);
+
+       ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, SOCKET, &rx_conf,
+                                    pktmbuf_pool);
+       if (ret < 0)
+               FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
+                           (unsigned)port, ret);
+
+       ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, SOCKET, &tx_conf);
+       if (ret < 0)
+               FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
+                           (unsigned)port, ret);
+
+       ret = rte_eth_dev_start(port);
+       if (ret < 0)
+               FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret);
+
+       /*  Everything is setup and started, print link status */
+       rte_eth_link_get(port, &link);
+       if (link.link_status)
+               PRINT_INFO("    link up - %u Mbit/s - %s",
+                          (unsigned)link.link_speed,
+                          (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                          ("full-duplex") : ("half-duplex"));
+       else
+               PRINT_INFO("    link down");
+
+       rte_eth_promiscuous_enable(port);
+}
+
+/* Initialise ports/queues etc. and start main loop on each core */
+int
+main(int argc, char** argv)
+{
+       int ret;
+       unsigned i,high_port;
+       uint8_t nb_sys_ports, port;
+
+       /* Associate signal_hanlder function with USR signals */
+       signal(SIGUSR1, signal_handler);
+       signal(SIGUSR2, signal_handler);
+
+       /* Initialise EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               FATAL_ERROR("Could not initialise EAL (%d)", ret);
+       argc -= ret;
+       argv += ret;
+
+       /* Parse application arguments (after the EAL ones) */
+       parse_args(argc, argv);
+
+       /* Create the mbuf pool */
+       pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SZ,
+                       MEMPOOL_CACHE_SZ,
+                       sizeof(struct rte_pktmbuf_pool_private),
+                       rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
+                       SOCKET, 0);
+       if (pktmbuf_pool == NULL) {
+               FATAL_ERROR("Could not initialise mbuf pool");
+               return -1;
+       }
+
+       /* Initialise PMD driver(s) */
+#ifdef RTE_LIBRTE_IGB_PMD
+       ret = rte_igb_pmd_init();
+       if (ret < 0)
+               FATAL_ERROR("Could not initialise igb PMD (%d)", ret);
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       ret = rte_ixgbe_pmd_init();
+       if (ret < 0)
+               FATAL_ERROR("Could not initialise ixgbe PMD (%d)", ret);
+#endif
+
+       /* Scan PCI bus for recognised devices */
+       ret = rte_eal_pci_probe();
+       if (ret < 0)
+               FATAL_ERROR("Could not probe PCI (%d)", ret);
+
+       /* Get number of ports found in scan */
+       nb_sys_ports = rte_eth_dev_count();
+       if (nb_sys_ports == 0)
+               FATAL_ERROR("No supported Ethernet devices found - check that "
+                           "CONFIG_RTE_LIBRTE_IGB_PMD=y and/or "
+                           "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in the config file");
+       /* Find highest port set in portmask */
+       for (high_port = (sizeof(ports_mask) * 8) - 1;
+                       (high_port != 0) && !(ports_mask & (1 << high_port));
+                       high_port--)
+               ; /* empty body */
+       if (high_port > nb_sys_ports)
+               FATAL_ERROR("Port mask requires more ports than available");
+
+       /* Initialise each port */
+       for (port = 0; port < nb_sys_ports; port++) {
+               /* Skip ports that are not enabled */
+               if ((ports_mask & (1 << port)) == 0) {
+                       continue;
+               }
+               init_port(port);
+       }
+
+       /* Launch per-lcore function on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(i) {
+               if (rte_eal_wait_lcore(i) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf b/examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..c58e75e
Binary files /dev/null and b/examples/helloworld/482249_HelloWorld_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/helloworld/Makefile b/examples/helloworld/Makefile
new file mode 100644 (file)
index 0000000..0e78fa6
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = helloworld
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/helloworld/main.c b/examples/helloworld/main.c
new file mode 100644 (file)
index 0000000..8038685
--- /dev/null
@@ -0,0 +1,82 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+
+#include "main.h"
+
+static int
+lcore_hello(__attribute__((unused)) void *arg)
+{
+       unsigned lcore_id;
+       lcore_id = rte_lcore_id();
+       printf("hello from core %u\n", lcore_id);
+       return 0;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       int ret;
+       unsigned lcore_id;
+
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_panic("Cannot init EAL\n");
+
+       /* call lcore_hello() on every slave lcore */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(lcore_hello, NULL, lcore_id);
+       }
+
+       /* call it on master lcore too */
+       lcore_hello(NULL);
+
+       rte_eal_mp_wait_lcore();
+       return 0;
+}
diff --git a/examples/helloworld/main.h b/examples/helloworld/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf b/examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..ae0a011
Binary files /dev/null and b/examples/ipv4_frag/490761_IPv4_Frag_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/ipv4_frag/Makefile b/examples/ipv4_frag/Makefile
new file mode 100644 (file)
index 0000000..78b5f8d
--- /dev/null
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_MBUF_SCATTER_GATHER),y)
+$(error This application requires RTE_MBUF_SCATTER_GATHER to be enabled)
+endif
+
+# binary name
+APP = ipv4_frag
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ipv4_frag/main.c b/examples/ipv4_frag/main.c
new file mode 100644 (file)
index 0000000..0f0c5f6
--- /dev/null
@@ -0,0 +1,707 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_lpm.h>
+#include <rte_ip.h>
+
+#include "rte_ipv4_frag.h"
+#include "main.h"
+
+#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
+
+#define MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+/* allow max jumbo frame 9.5 KB */
+#define JUMBO_FRAME_MAX_SIZE   0x2600
+
+#define        ROUNDUP_DIV(a, b)       (((a) + (b) - 1) / (b))
+
+/*
+ * Max number of fragments per packet expected.
+ */
+#define        MAX_PACKET_FRAG ROUNDUP_DIV(JUMBO_FRAME_MAX_SIZE, IPV4_DEFAULT_PAYLOAD)
+
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST  32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET        3
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr ports_eth_addr[MAX_PORTS];
+static struct ether_addr remote_eth_addr =
+       {{0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff}};
+
+/* mask of enabled ports */
+static int enabled_port_mask = 0;
+
+static int rx_queue_per_lcore = 1;
+
+#define MBUF_TABLE_SIZE  (2 * MAX(MAX_PKT_BURST, MAX_PACKET_FRAG))
+
+struct mbuf_table {
+       uint16_t len;
+       struct rte_mbuf *m_table[MBUF_TABLE_SIZE];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+       uint16_t n_rx_queue;
+       uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       uint16_t tx_queue_id[MAX_PORTS];
+       struct mbuf_table tx_mbufs[MAX_PORTS];
+
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 1, /**< Jumbo Frame Support enabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+struct rte_mempool *pool_direct = NULL, *pool_indirect = NULL;
+
+struct l3fwd_route {
+       uint32_t ip;
+       uint8_t  depth;
+       uint8_t  if_out;
+};
+
+struct l3fwd_route l3fwd_route_array[] = {
+       {IPv4(100,10,0,0), 16, 2},
+       {IPv4(100,20,0,0), 16, 2},
+       {IPv4(100,30,0,0), 16, 0},
+       {IPv4(100,40,0,0), 16, 0},
+};
+
+#define L3FWD_NUM_ROUTES \
+       (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
+
+#define L3FWD_LPM_MAX_RULES     1024
+
+struct rte_lpm *l3fwd_lpm = NULL;
+
+/* Send burst of packets on an output interface */
+static inline int
+send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       int ret;
+       uint16_t queueid;
+
+       queueid = qconf->tx_queue_id[port];
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, queueid, m_table, n);
+       if (unlikely(ret < n)) {
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+static inline void
+l3fwd_simple_forward(struct rte_mbuf *m, uint8_t port_in)
+{
+       struct lcore_queue_conf *qconf;
+       struct ipv4_hdr *ip_hdr;
+       uint32_t i, len, lcore_id, ip_dst;
+       uint8_t next_hop, port_out;
+       int32_t len2;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+       /* Remove the Ethernet header and trailer from the input packet */
+       rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
+
+       /* Read the lookup key (i.e. ip_dst) from the input packet */
+       ip_hdr = rte_pktmbuf_mtod(m, struct ipv4_hdr *);
+       ip_dst = rte_be_to_cpu_32(ip_hdr->dst_addr);
+
+       /* Find destination port */
+       if (rte_lpm_lookup(l3fwd_lpm, ip_dst, &next_hop) == 0 &&
+                       (enabled_port_mask & 1 << next_hop) != 0)
+               port_out = next_hop;
+       else
+               port_out = port_in;
+
+       /* Build transmission burst */
+       len = qconf->tx_mbufs[port_out].len;
+
+       /* if we don't need to do any fragmentation */
+       if (likely (IPV4_MTU_DEFAULT  >= m->pkt.pkt_len)) {
+               qconf->tx_mbufs[port_out].m_table[len] = m;
+               len2 = 1;
+       } else {
+               len2 = rte_ipv4_fragmentation(m,
+                       &qconf->tx_mbufs[port_out].m_table[len],
+                       (uint16_t)(MBUF_TABLE_SIZE - len),
+                       IPV4_MTU_DEFAULT,
+                       pool_direct, pool_indirect);
+
+               /* Free input packet */
+               rte_pktmbuf_free(m);
+
+               /* If we fail to fragment the packet */
+               if (unlikely (len2 < 0))
+                       return;
+       }
+
+       for (i = len; i < len + len2; i ++) {
+               m = qconf->tx_mbufs[port_out].m_table[i];
+               struct ether_hdr *eth_hdr = (struct ether_hdr *)
+                       rte_pktmbuf_prepend(m, (uint16_t)sizeof(struct ether_hdr));
+               if (eth_hdr == NULL) {
+                       rte_panic("No headroom in mbuf.\n");
+               }
+
+               m->pkt.l2_len = sizeof(struct ether_hdr);
+
+               ether_addr_copy(&remote_eth_addr, &eth_hdr->d_addr);
+               ether_addr_copy(&ports_eth_addr[port_out], &eth_hdr->s_addr);
+               eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+       }
+
+       len += len2;
+
+       if (likely(len < MAX_PKT_BURST)) {
+               qconf->tx_mbufs[port_out].len = (uint16_t)len;
+               return;
+       }
+
+       /* Transmit packets */
+       send_burst(qconf, (uint16_t)len, port_out);
+       qconf->tx_mbufs[port_out].len = 0;
+}
+
+/* main processing loop */
+static __attribute__((noreturn)) int
+main_loop(__attribute__((unused)) void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       uint32_t lcore_id;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc;
+       int i, j, nb_rx;
+       uint8_t portid;
+       struct lcore_queue_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i];
+               RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%d\n", lcore_id,
+                       (int) portid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       /*
+                        * This could be optimized (use queueid instead of
+                        * portid), but it is not called so often
+                        */
+                       for (portid = 0; portid < MAX_PORTS; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               send_burst(&lcore_queue_conf[lcore_id],
+                                          qconf->tx_mbufs[portid].len,
+                                          portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+
+                       portid = qconf->rx_queue_list[i];
+                       nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
+                                                MAX_PKT_BURST);
+
+                       /* Prefetch first packets */
+                       for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(
+                                               pkts_burst[j], void *));
+                       }
+
+                       /* Prefetch and forward already prefetched packets */
+                       for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+                                               j + PREFETCH_OFFSET], void *));
+                               l3fwd_simple_forward(pkts_burst[j], portid);
+                       }
+
+                       /* Forward remaining prefetched packets */
+                       for (; j < nb_rx; j++) {
+                               l3fwd_simple_forward(pkts_burst[j], portid);
+                       }
+               }
+       }
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+       printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+              "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+              "  -q NQ: number of queue (=ports) per lcore (default is 1)\n",
+              prgname);
+}
+
+static int
+parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static int
+parse_nqueue(const char *q_arg)
+{
+       char *end = NULL;
+       unsigned long n;
+
+       /* parse hexadecimal string */
+       n = strtoul(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+       if (n == 0)
+               return -1;
+       if (n >= MAX_RX_QUEUE_PER_LCORE)
+               return -1;
+
+       return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:q:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       enabled_port_mask = parse_portmask(optarg);
+                       if (enabled_port_mask < 0) {
+                               printf("invalid portmask\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* nqueue */
+               case 'q':
+                       rx_queue_per_lcore = parse_nqueue(optarg);
+                       if (rx_queue_per_lcore < 0) {
+                               printf("invalid queue number\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       print_usage(prgname);
+                       return -1;
+
+               default:
+                       print_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (enabled_port_mask == 0) {
+               printf("portmask not specified\n");
+               print_usage(prgname);
+               return -1;
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+static void
+print_ethaddr(const char *name, struct ether_addr *eth_addr)
+{
+       printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+              eth_addr->addr_bytes[0],
+              eth_addr->addr_bytes[1],
+              eth_addr->addr_bytes[2],
+              eth_addr->addr_bytes[3],
+              eth_addr->addr_bytes[4],
+              eth_addr->addr_bytes[5]);
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_queue_conf *qconf;
+       struct rte_eth_link link;
+       int ret;
+       unsigned nb_ports, i;
+       uint16_t queueid = 0;
+       unsigned lcore_id = 0, rx_lcore_id = 0;;
+       uint32_t n_tx_queue, nb_lcores;
+       uint8_t portid;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "rte_eal_init failed");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid arguments");
+
+       /* create the mbuf pools */
+       pool_direct =
+               rte_mempool_create("pool_direct", NB_MBUF,
+                                  MBUF_SIZE, 32,
+                                  sizeof(struct rte_pktmbuf_pool_private),
+                                  rte_pktmbuf_pool_init, NULL,
+                                  rte_pktmbuf_init, NULL,
+                                  SOCKET0, 0);
+       if (pool_direct == NULL)
+               rte_panic("Cannot init direct mbuf pool\n");
+
+       pool_indirect =
+               rte_mempool_create("pool_indirect", NB_MBUF,
+                                  sizeof(struct rte_mbuf), 32,
+                                  0,
+                                  NULL, NULL,
+                                  rte_pktmbuf_init, NULL,
+                                  SOCKET0, 0);
+       if (pool_indirect == NULL)
+               rte_panic("Cannot init indirect mbuf pool\n");
+
+       /* init driver */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_panic("Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_panic("Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_panic("Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports > MAX_PORTS)
+               nb_ports = MAX_PORTS;
+
+       nb_lcores = rte_lcore_count();
+
+       /* initialize all ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("Skipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               qconf = &lcore_queue_conf[rx_lcore_id];
+
+               /* get the lcore_id for this port */
+               while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+                      qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
+
+                       rx_lcore_id ++;
+                       qconf = &lcore_queue_conf[rx_lcore_id];
+
+                       if (rx_lcore_id >= RTE_MAX_LCORE)
+                               rte_exit(EXIT_FAILURE, "Not enough cores\n");
+               }
+               qconf->rx_queue_list[qconf->n_rx_queue] = portid;
+               qconf->n_rx_queue++;
+
+               /* init port */
+               printf("Initializing port %d on lcore %u... ", portid,
+                      rx_lcore_id);
+               fflush(stdout);
+
+               n_tx_queue = nb_lcores;
+               if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
+                       n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+               ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
+                                           &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: "
+                               "err=%d, port=%d\n",
+                               ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+
+               /* init one RX queue */
+               queueid = 0;
+               printf("rxq=%d ", queueid);
+               fflush(stdout);
+               ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+                                            SOCKET0, &rx_conf,
+                                            pool_direct);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+                               "err=%d, port=%d\n",
+                               ret, portid);
+
+               /* init one TX queue per couple (lcore,port) */
+               queueid = 0;
+               for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+                       if (rte_lcore_is_enabled(lcore_id) == 0)
+                               continue;
+                       printf("txq=%u,%d ", lcore_id, queueid);
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+                                                    SOCKET0, &tx_conf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+                                       "err=%d, port=%d\n", ret, portid);
+
+                       qconf = &lcore_queue_conf[lcore_id];
+                       qconf->tx_queue_id[portid] = queueid;
+                       queueid++;
+               }
+
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
+                               "err=%d, port=%d\n",
+                               ret, portid);
+
+               printf("done: ");
+
+               /* get link status */
+               rte_eth_link_get(portid, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (uint32_t) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               } else {
+                       printf(" Link Down\n");
+               }
+
+               /* Set port in promiscuous mode */
+               rte_eth_promiscuous_enable(portid);
+       }
+
+       /* create the LPM table */
+       l3fwd_lpm = rte_lpm_create("L3FWD_LPM", SOCKET0, L3FWD_LPM_MAX_RULES,
+                       RTE_LPM_MEMZONE);
+       if (l3fwd_lpm == NULL)
+               rte_panic("Unable to create the l3fwd LPM table\n");
+
+       /* populate the LPM table */
+       for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
+               ret = rte_lpm_add(l3fwd_lpm,
+                       l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+
+               if (ret < 0) {
+                       rte_panic("Unable to add entry %u to the l3fwd "
+                               "LPM table\n", i);
+               }
+
+               printf("Adding route 0x%08x / %d (%d)\n",
+                       l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/ipv4_frag/main.h b/examples/ipv4_frag/main.h
new file mode 100644 (file)
index 0000000..740cf4c
--- /dev/null
@@ -0,0 +1,48 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/ipv4_frag/rte_ipv4_frag.h b/examples/ipv4_frag/rte_ipv4_frag.h
new file mode 100644 (file)
index 0000000..99ef0d2
--- /dev/null
@@ -0,0 +1,253 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef __INCLUDE_RTE_IPV4_FRAG_H__
+#define __INCLUDE_RTE_IPV4_FRAG_H__
+#include <rte_ip.h>
+
+/**
+ * @file
+ * RTE IPv4 Fragmentation
+ *
+ * Implementation of IPv4 fragmentation.
+ *
+ */
+
+/*
+ * Default byte size for the IPv4 Maximum Transfer Unit (MTU).
+ * This value includes the size of IPv4 header.
+ */
+#define        IPV4_MTU_DEFAULT        ETHER_MTU
+
+/*
+ * Default payload in bytes for the IPv4 packet.
+ */
+#define        IPV4_DEFAULT_PAYLOAD    (IPV4_MTU_DEFAULT - sizeof(struct ipv4_hdr))
+
+/*
+ * MAX number of fragments per packet allowed.
+ */
+#define        IPV4_MAX_FRAGS_PER_PACKET       0x80
+
+
+/* Debug on/off */
+#ifdef RTE_IPV4_FRAG_DEBUG
+
+#define        RTE_IPV4_FRAG_ASSERT(exp)                                       \
+if (!(exp))    {                                                       \
+       rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n",  \
+               __func__, __LINE__);                                    \
+}
+
+#else /*RTE_IPV4_FRAG_DEBUG*/
+
+#define RTE_IPV4_FRAG_ASSERT(exp)      do { } while(0)
+
+#endif /*RTE_IPV4_FRAG_DEBUG*/
+
+/* Fragment Offset */
+#define        IPV4_HDR_DF_SHIFT                       14
+#define        IPV4_HDR_MF_SHIFT                       13
+#define        IPV4_HDR_FO_SHIFT                       3
+
+#define        IPV4_HDR_DF_MASK                        (1 << IPV4_HDR_DF_SHIFT)
+#define        IPV4_HDR_MF_MASK                        (1 << IPV4_HDR_MF_SHIFT)
+
+#define        IPV4_HDR_FO_MASK                        ((1 << IPV4_HDR_FO_SHIFT) - 1)
+
+static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
+               const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
+               uint16_t dofs, uint32_t mf)
+{
+       rte_memcpy(dst, src, sizeof(*dst));
+       fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT));
+       fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT);
+       dst->fragment_offset = rte_cpu_to_be_16(fofs);
+       dst->total_length = rte_cpu_to_be_16(len);
+       dst->hdr_checksum = 0;
+}
+
+static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
+{
+       uint32_t i;
+       for (i = 0; i != num; i++)
+               rte_pktmbuf_free(mb[i]);
+}
+
+/**
+ * IPv4 fragmentation.
+ *
+ * This function implements the fragmentation of IPv4 packets.
+ *
+ * @param pkt_in
+ *   The input packet.
+ * @param pkts_out
+ *   Array storing the output fragments.
+ * @param mtu_size
+ *   Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
+ *   datagrams. This value includes the size of the IPv4 header.
+ * @param pool_direct
+ *   MBUF pool used for allocating direct buffers for the output fragments.
+ * @param pool_indirect
+ *   MBUF pool used for allocating indirect buffers for the output fragments.
+ * @return
+ *   Upon successful completion - number of output fragments placed
+ *   in the pkts_out array.
+ *   Otherwise - (-1) * <errno>.
+ */
+static inline int32_t rte_ipv4_fragmentation(struct rte_mbuf *pkt_in,
+       struct rte_mbuf **pkts_out,
+       uint16_t nb_pkts_out,
+       uint16_t mtu_size,
+       struct rte_mempool *pool_direct,
+       struct rte_mempool *pool_indirect)
+{
+       struct rte_mbuf *in_seg = NULL;
+       struct ipv4_hdr *in_hdr;
+       uint32_t out_pkt_pos, in_seg_data_pos;
+       uint32_t more_in_segs;
+       uint16_t fragment_offset, flag_offset, frag_size;
+
+       frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
+
+       /* Fragment size should be a multiply of 8. */
+       RTE_IPV4_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+
+       /* Fragment size should be a multiply of 8. */
+       RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >=
+           (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr)));
+
+       in_hdr = (struct ipv4_hdr*) pkt_in->pkt.data;
+       flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
+
+       /* If Don't Fragment flag is set */
+       if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
+               return (-ENOTSUP);
+
+       /* Check that pkts_out is big enough to hold all fragments */
+       if (unlikely (frag_size * nb_pkts_out <
+           (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
+               return (-EINVAL);
+
+       in_seg = pkt_in;
+       in_seg_data_pos = sizeof(struct ipv4_hdr);
+       out_pkt_pos = 0;
+       fragment_offset = 0;
+
+       more_in_segs = 1;
+       while (likely(more_in_segs)) {
+               struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
+               uint32_t more_out_segs;
+               struct ipv4_hdr *out_hdr;
+
+               /* Allocate direct buffer */
+               out_pkt = rte_pktmbuf_alloc(pool_direct);
+               if (unlikely(out_pkt == NULL)) {
+                       __free_fragments(pkts_out, out_pkt_pos);
+                       return (-ENOMEM);
+               }
+
+               /* Reserve space for the IP header that will be built later */
+               out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
+               out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
+
+               out_seg_prev = out_pkt;
+               more_out_segs = 1;
+               while (likely(more_out_segs && more_in_segs)) {
+                       struct rte_mbuf *out_seg = NULL;
+                       uint32_t len;
+
+                       /* Allocate indirect buffer */
+                       out_seg = rte_pktmbuf_alloc(pool_indirect);
+                       if (unlikely(out_seg == NULL)) {
+                               rte_pktmbuf_free(out_pkt);
+                               __free_fragments(pkts_out, out_pkt_pos);
+                               return (-ENOMEM);
+                       }
+                       out_seg_prev->pkt.next = out_seg;
+                       out_seg_prev = out_seg;
+
+                       /* Prepare indirect buffer */
+                       rte_pktmbuf_attach(out_seg, in_seg);
+                       len = mtu_size - out_pkt->pkt.pkt_len;
+                       if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
+                               len = in_seg->pkt.data_len - in_seg_data_pos;
+                       }
+                       out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos;
+                       out_seg->pkt.data_len = (uint16_t)len;
+                       out_pkt->pkt.pkt_len = (uint16_t)(len +
+                           out_pkt->pkt.pkt_len);
+                       out_pkt->pkt.nb_segs += 1;
+                       in_seg_data_pos += len;
+
+                       /* Current output packet (i.e. fragment) done ? */
+                       if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+                               more_out_segs = 0;
+                       }
+
+                       /* Current input segment done ? */
+                       if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
+                               in_seg = in_seg->pkt.next;
+                               in_seg_data_pos = 0;
+
+                               if (unlikely(in_seg == NULL)) {
+                                       more_in_segs = 0;
+                               }
+                       }
+               }
+
+               /* Build the IP header */
+
+               out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
+
+               __fill_ipv4hdr_frag(out_hdr, in_hdr,
+                   (uint16_t)out_pkt->pkt.pkt_len,
+                   flag_offset, fragment_offset, more_in_segs);
+
+               fragment_offset = (uint16_t)(fragment_offset +
+                   out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
+
+               out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
+               out_pkt->pkt.l3_len = sizeof(struct ipv4_hdr);
+
+               /* Write the fragment to the output list */
+               pkts_out[out_pkt_pos] = out_pkt;
+               out_pkt_pos ++;
+       }
+
+       return (out_pkt_pos);
+}
+
+#endif
diff --git a/examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf b/examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..197c85e
Binary files /dev/null and b/examples/ipv4_multicast/496632_IPv4_Multicast_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/ipv4_multicast/Makefile b/examples/ipv4_multicast/Makefile
new file mode 100644 (file)
index 0000000..ef98809
--- /dev/null
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+#
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_MBUF_SCATTER_GATHER),y)
+$(error This application requires RTE_MBUF_SCATTER_GATHER to be enabled)
+endif
+
+# binary name
+APP = ipv4_multicast
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
new file mode 100644 (file)
index 0000000..bfabf75
--- /dev/null
@@ -0,0 +1,834 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_hash_crc.h>
+#include <rte_fbk_hash.h>
+#include <rte_ip.h>
+
+#include "main.h"
+
+#define RTE_LOGTYPE_IPv4_MULTICAST RTE_LOGTYPE_USER1
+
+#define MAX_PORTS 16
+
+#define        MCAST_CLONE_PORTS       2
+#define        MCAST_CLONE_SEGS        2
+
+#define        PKT_MBUF_SIZE   (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define        NB_PKT_MBUF     8192
+
+#define        HDR_MBUF_SIZE   (sizeof(struct rte_mbuf) + 2 * RTE_PKTMBUF_HEADROOM)
+#define        NB_HDR_MBUF     (NB_PKT_MBUF * MAX_PORTS)
+
+#define        CLONE_MBUF_SIZE (sizeof(struct rte_mbuf))
+#define        NB_CLONE_MBUF   (NB_PKT_MBUF * MCAST_CLONE_PORTS * MCAST_CLONE_SEGS * 2)
+
+/* allow max jumbo frame 9.5 KB */
+#define        JUMBO_FRAME_MAX_SIZE    0x2600
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET        3
+
+/*
+ * Construct Ethernet multicast address from IPv4 multicast address.
+ * Citing RFC 1112, section 6.4:
+ * "An IP host group address is mapped to an Ethernet multicast address
+ * by placing the low-order 23-bits of the IP address into the low-order
+ * 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)."
+ */
+#define        ETHER_ADDR_FOR_IPV4_MCAST(x)    \
+       (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16)
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr ports_eth_addr[MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t enabled_port_mask = 0;
+
+static uint8_t nb_ports = 0;
+
+static int rx_queue_per_lcore = 1;
+
+struct mbuf_table {
+       uint16_t len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+       uint64_t tx_tsc;
+       uint16_t n_rx_queue;
+       uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       uint16_t tx_queue_id[MAX_PORTS];
+       struct mbuf_table tx_mbufs[MAX_PORTS];
+} __rte_cache_aligned;
+static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 1, /**< Jumbo Frame Support enabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+static struct rte_mempool *packet_pool, *header_pool, *clone_pool;
+
+
+/* Multicast */
+static struct rte_fbk_hash_params mcast_hash_params = {
+       .name = "MCAST_HASH",
+       .entries = 1024,
+       .entries_per_bucket = 4,
+       .socket_id = SOCKET0,
+       .hash_func = NULL,
+       .init_val = 0,
+};
+
+struct rte_fbk_hash_table *mcast_hash = NULL;
+
+struct mcast_group_params {
+       uint32_t ip;
+       uint16_t port_mask;
+};
+
+static struct mcast_group_params mcast_group_table[] = {
+               {IPv4(224,0,0,101), 0x1},
+               {IPv4(224,0,0,102), 0x2},
+               {IPv4(224,0,0,103), 0x3},
+               {IPv4(224,0,0,104), 0x4},
+               {IPv4(224,0,0,105), 0x5},
+               {IPv4(224,0,0,106), 0x6},
+               {IPv4(224,0,0,107), 0x7},
+               {IPv4(224,0,0,108), 0x8},
+               {IPv4(224,0,0,109), 0x9},
+               {IPv4(224,0,0,110), 0xA},
+               {IPv4(224,0,0,111), 0xB},
+               {IPv4(224,0,0,112), 0xC},
+               {IPv4(224,0,0,113), 0xD},
+               {IPv4(224,0,0,114), 0xE},
+               {IPv4(224,0,0,115), 0xF},
+};
+
+#define N_MCAST_GROUPS \
+       (sizeof (mcast_group_table) / sizeof (mcast_group_table[0]))
+
+
+/* Send burst of packets on an output interface */
+static void
+send_burst(struct lcore_queue_conf *qconf, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       uint16_t n, queueid;
+       int ret;
+
+       queueid = qconf->tx_queue_id[port];
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+       n = qconf->tx_mbufs[port].len;
+
+       ret = rte_eth_tx_burst(port, queueid, m_table, n);
+       while (unlikely (ret < n)) {
+               rte_pktmbuf_free(m_table[ret]);
+               ret++;
+       }
+
+       qconf->tx_mbufs[port].len = 0;
+}
+
+/* Get number of bits set. */
+static inline uint32_t
+bitcnt(uint32_t v)
+{
+       uint32_t n;
+
+       for (n = 0; v != 0; v &= v - 1, n++)
+               ;
+
+       return (n);
+}
+
+/**
+ * Create the output multicast packet based on the given input packet.
+ * There are two approaches for creating outgoing packet, though both
+ * are based on data zero-copy idea, they differ in few details:
+ * First one creates a clone of the input packet, e.g - walk though all
+ * segments of the input packet, and for each of them create a new packet
+ * mbuf and attach that new mbuf to the segment (refer to rte_pktmbuf_clone()
+ * for more details). Then new mbuf is allocated for the packet header
+ * and is prepended to the 'clone' mbuf.
+ * Second approach doesn't make a clone, it just increment refcnt for all
+ * input packet segments. Then it allocates new mbuf for the packet header
+ * and prepends it to the input packet.
+ * Basically first approach reuses only input packet's data, but creates
+ * it's own copy of packet's metadata. Second approach reuses both input's
+ * packet data and metadata.
+ * The advantage of first approach - is that each outgoing packet has it's
+ * own copy of metadata, so we can safely modify data pointer of the
+ * input packet. That allows us to skip creation if the output packet for
+ * the last destination port, but instead modify input packet's header inplace,
+ * e.g: for N destination ports we need to invoke mcast_out_pkt (N-1) times.
+ * The advantage of second approach - less work for each outgoing packet,
+ * e.g: we skip "clone" operation completely. Though it comes with a price -
+ * input packet's metadata has to be intact. So for N destination ports we
+ * need to invoke mcast_out_pkt N times.
+ * So for small number of outgoing ports (and segments in the input packet)
+ * first approach will be faster.
+ * As number of outgoing ports (and/or input segments) will grow,
+ * second way will become more preferable.
+ *
+ *  @param pkt
+ *  Input packet mbuf.
+ *  @param use_clone
+ *  Control which of the two approaches described above should be used:
+ *  - 0 - use second approach:
+ *    Don't "clone" input packet.
+ *    Prepend new header directly to the input packet
+ *  - 1 - use first approach:
+ *    Make a "clone" of input packet first.
+ *    Prepend new header to the clone of the input packet
+ *  @return
+ *  - The pointer to the new outgoing packet.
+ *  - NULL if operation failed.
+ */
+static inline struct rte_mbuf *
+mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
+{
+       struct rte_mbuf *hdr;
+
+       /* Create new mbuf for the header. */
+       if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
+               return (NULL);
+
+       /* If requested, then make a new clone packet. */
+       if (use_clone != 0 &&
+           unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
+               rte_pktmbuf_free(hdr);
+               return (NULL);
+       }
+
+       /* prepend new header */
+       hdr->pkt.next = pkt;
+
+
+       /* update header's fields */
+       hdr->pkt.pkt_len = (uint16_t)(hdr->pkt.data_len + pkt->pkt.pkt_len);
+       hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
+
+       /* copy metadata from source packet*/
+       hdr->pkt.in_port = pkt->pkt.in_port;
+       hdr->pkt.vlan_tci = pkt->pkt.vlan_tci;
+       hdr->pkt.l2_len = pkt->pkt.l2_len;
+       hdr->pkt.l3_len = pkt->pkt.l3_len;
+       hdr->pkt.hash = pkt->pkt.hash;
+
+       hdr->ol_flags = pkt->ol_flags;
+
+       __rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
+       return (hdr);
+}
+
+/*
+ * Write new Ethernet header to the outgoing packet,
+ * and put it into the outgoing queue for the given port.
+ */
+static inline void
+mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
+               struct lcore_queue_conf *qconf, uint8_t port)
+{
+       struct ether_hdr *ethdr;
+       uint16_t len;
+
+       /* Construct Ethernet header. */
+       ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
+       RTE_MBUF_ASSERT(ethdr != NULL);
+
+       ether_addr_copy(dest_addr, &ethdr->d_addr);
+       ether_addr_copy(&ports_eth_addr[port], &ethdr->s_addr);
+       ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
+
+       /* Put new packet into the output queue */
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = pkt;
+       qconf->tx_mbufs[port].len = ++len;
+
+       /* Transmit packets */
+       if (unlikely(MAX_PKT_BURST == len))
+               send_burst(qconf, port);
+}
+
+/* Multicast forward of the input packet */
+static inline void
+mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
+{
+       struct rte_mbuf *mc;
+       struct ipv4_hdr *iphdr;
+       uint32_t dest_addr, port_mask, port_num, use_clone;
+       int32_t hash;
+       uint8_t port;
+       union {
+               uint64_t as_int;
+               struct ether_addr as_addr;
+       } dst_eth_addr;
+
+       /* Remove the Ethernet header from the input packet */
+       iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
+       RTE_MBUF_ASSERT(iphdr != NULL);
+
+       dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
+
+       /*
+        * Check that it is a valid multicast address and
+        * we have some active ports assigned to it.
+        */
+       if(!IS_IPV4_MCAST(dest_addr) ||
+           (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 ||
+           (port_mask = hash & enabled_port_mask) == 0) {
+               rte_pktmbuf_free(m);
+               return;
+       }
+
+       /* Calculate number of destination ports. */
+       port_num = bitcnt(port_mask);
+
+       /* Should we use rte_pktmbuf_clone() or not. */
+       use_clone = (port_num <= MCAST_CLONE_PORTS &&
+           m->pkt.nb_segs <= MCAST_CLONE_SEGS);
+
+       /* Mark all packet's segments as referenced port_num times */
+       if (use_clone == 0)
+               rte_pktmbuf_refcnt_update(m, (uint16_t)port_num);
+
+       /* construct destination ethernet address */
+       dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr);
+
+       for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) {
+
+               /* Prepare output packet and send it out. */
+               if ((port_mask & 1) != 0) {
+                       if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL))
+                               mcast_send_pkt(mc, &dst_eth_addr.as_addr,
+                                               qconf, port);
+                       else if (use_clone == 0)
+                               rte_pktmbuf_free(m);
+               }
+       }
+
+       /*
+        * If we making clone packets, then, for the last destination port,
+        * we can overwrite input packet's metadata.
+        */
+       if (use_clone != 0)
+               mcast_send_pkt(m, &dst_eth_addr.as_addr, qconf, port);
+       else
+               rte_pktmbuf_free(m);
+}
+
+/* Send burst of outgoing packet, if timeout expires. */
+static inline void
+send_timeout_burst(struct lcore_queue_conf *qconf)
+{
+       uint64_t cur_tsc;
+       uint8_t portid;
+
+       cur_tsc = rte_rdtsc();
+       if (likely (cur_tsc < qconf->tx_tsc + BURST_TX_DRAIN))
+               return;
+
+       for (portid = 0; portid < MAX_PORTS; portid++) {
+               if (qconf->tx_mbufs[portid].len != 0)
+                       send_burst(qconf, portid);
+       }
+       qconf->tx_tsc = cur_tsc;
+}
+
+/* main processing loop */
+static __attribute__((noreturn)) int
+main_loop(__rte_unused void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       uint32_t lcore_id;
+       int i, j, nb_rx;
+       uint8_t portid;
+       struct lcore_queue_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, IPv4_MULTICAST, "lcore %u has nothing to do\n",
+                   lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, IPv4_MULTICAST, "entering main loop on lcore %u\n",
+           lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i];
+               RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
+                   lcore_id, (int) portid);
+       }
+
+       while (1) {
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+
+                       portid = qconf->rx_queue_list[i];
+                       nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
+                                                MAX_PKT_BURST);
+
+                       /* Prefetch first packets */
+                       for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(
+                                               pkts_burst[j], void *));
+                       }
+
+                       /* Prefetch and forward already prefetched packets */
+                       for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+                                               j + PREFETCH_OFFSET], void *));
+                               mcast_forward(pkts_burst[j], qconf);
+                       }
+
+                       /* Forward remaining prefetched packets */
+                       for (; j < nb_rx; j++) {
+                               mcast_forward(pkts_burst[j], qconf);
+                       }
+               }
+
+               /* Send out packets from TX queues */
+               send_timeout_burst(qconf);
+       }
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+       printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+           "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+           "  -q NQ: number of queue (=ports) per lcore (default is 1)\n",
+           prgname);
+}
+
+static uint32_t
+parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+
+       return ((uint32_t)pm);
+}
+
+static int
+parse_nqueue(const char *q_arg)
+{
+       char *end = NULL;
+       unsigned long n;
+
+       /* parse numerical string */
+       errno = 0;
+       n = strtoul(q_arg, &end, 0);
+       if (errno != 0 || end == NULL || *end != '\0' ||
+                       n == 0 || n >= MAX_RX_QUEUE_PER_LCORE)
+               return (-1);
+
+       return (n);
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:q:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       enabled_port_mask = parse_portmask(optarg);
+                       if (enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* nqueue */
+               case 'q':
+                       rx_queue_per_lcore = parse_nqueue(optarg);
+                       if (rx_queue_per_lcore < 0) {
+                               printf("invalid queue number\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               default:
+                       print_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+static void
+print_ethaddr(const char *name, struct ether_addr *eth_addr)
+{
+       printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+              eth_addr->addr_bytes[0],
+              eth_addr->addr_bytes[1],
+              eth_addr->addr_bytes[2],
+              eth_addr->addr_bytes[3],
+              eth_addr->addr_bytes[4],
+              eth_addr->addr_bytes[5]);
+}
+
+static int
+init_mcast_hash(void)
+{
+       uint32_t i;
+
+       mcast_hash = rte_fbk_hash_create(&mcast_hash_params);
+       if (mcast_hash == NULL){
+               return -1;
+       }
+
+       for (i = 0; i < N_MCAST_GROUPS; i ++){
+               if (rte_fbk_hash_add_key(mcast_hash,
+                       mcast_group_table[i].ip,
+                       mcast_group_table[i].port_mask) < 0) {
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_queue_conf *qconf;
+       struct rte_eth_link link;
+       int ret;
+       uint16_t queueid;
+       unsigned lcore_id = 0, rx_lcore_id = 0;;
+       uint32_t n_tx_queue, nb_lcores;
+       uint8_t portid;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n");
+
+       /* create the mbuf pools */
+       packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF,
+           PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
+           rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
+           SOCKET0, 0);
+
+       if (packet_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot init packet mbuf pool\n");
+
+       header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF,
+           HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL,
+           SOCKET0, 0);
+
+       if (header_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot init header mbuf pool\n");
+
+       clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF,
+           CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL,
+           SOCKET0, 0);
+
+       if (clone_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n");
+
+       /* init driver */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports == 0)
+               rte_exit(EXIT_FAILURE, "No physical ports!\n");
+       if (nb_ports > MAX_PORTS)
+               nb_ports = MAX_PORTS;
+
+       nb_lcores = rte_lcore_count();
+
+       /* initialize all ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("Skipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               qconf = &lcore_queue_conf[rx_lcore_id];
+
+               /* get the lcore_id for this port */
+               while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+                      qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
+
+                       rx_lcore_id ++;
+                       qconf = &lcore_queue_conf[rx_lcore_id];
+
+                       if (rx_lcore_id >= RTE_MAX_LCORE)
+                               rte_exit(EXIT_FAILURE, "Not enough cores\n");
+               }
+               qconf->rx_queue_list[qconf->n_rx_queue] = portid;
+               qconf->n_rx_queue++;
+
+               /* init port */
+               printf("Initializing port %d on lcore %u... ", portid,
+                      rx_lcore_id);
+               fflush(stdout);
+
+               n_tx_queue = nb_lcores;
+               if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
+                       n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+               ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
+                                           &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
+                                 ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+
+               /* init one RX queue */
+               queueid = 0;
+               printf("rxq=%hu ", queueid);
+               fflush(stdout);
+               ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+                                            SOCKET0, &rx_conf,
+                                            packet_pool);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n",
+                                 ret, portid);
+
+               /* init one TX queue per couple (lcore,port) */
+               queueid = 0;
+
+               RTE_LCORE_FOREACH(lcore_id) {
+                       if (rte_lcore_is_enabled(lcore_id) == 0)
+                               continue;
+                       printf("txq=%u,%hu ", lcore_id, queueid);
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+                                                    SOCKET0, &tx_conf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
+                                         "port=%d\n", ret, portid);
+
+                       qconf = &lcore_queue_conf[lcore_id];
+                       qconf->tx_queue_id[portid] = queueid;
+                       queueid++;
+               }
+
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
+                                 ret, portid);
+
+               printf("done: ");
+
+               /* get link status */
+               rte_eth_link_get(portid, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (uint32_t) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+                       rte_eth_promiscuous_enable(portid);
+                       rte_eth_allmulticast_enable(portid);
+               } else {
+                       printf(" Link Down\n");
+               }
+       }
+
+
+       /* initialize the multicast hash */
+       int retval = init_mcast_hash();
+       if (retval != 0)
+               rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n");
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/ipv4_multicast/main.h b/examples/ipv4_multicast/main.h
new file mode 100644 (file)
index 0000000..740cf4c
--- /dev/null
@@ -0,0 +1,48 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf b/examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..750d025
Binary files /dev/null and b/examples/l2fwd-vf/496039_L2Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/l2fwd-vf/Makefile b/examples/l2fwd-vf/Makefile
new file mode 100644 (file)
index 0000000..39ed08b
--- /dev/null
@@ -0,0 +1,53 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = l2fwd-vf
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
+
diff --git a/examples/l2fwd-vf/main.c b/examples/l2fwd-vf/main.c
new file mode 100644 (file)
index 0000000..836e85a
--- /dev/null
@@ -0,0 +1,708 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "main.h"
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define L2FWD_MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr l2fwd_ports_eth_addr[L2FWD_MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask = 0;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[L2FWD_MAX_PORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+#define MAX_PKT_BURST 32
+struct mbuf_table {
+       unsigned len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+
+/* Each VF(port) has one Rx/Tx queue (with queueid: 0) */
+struct lcore_queue_conf {
+
+       unsigned n_rx_queue;
+       unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       unsigned tx_queue_id;
+       struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS];
+
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 1, /**< CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+       uint64_t tx;
+       uint64_t rx;
+       uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[L2FWD_MAX_PORTS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+       uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+       unsigned portid;
+
+       total_packets_dropped = 0;
+       total_packets_tx = 0;
+       total_packets_rx = 0;
+
+       const char clr[] = { 27, '[', '2', 'J', '\0' };
+       const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+
+               /* Clear screen and move to top left */
+       printf("%s%s", clr, topLeft);
+
+       printf("\nPort statistics ====================================");
+
+       for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) {
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               printf("\nStatistics for port %u ------------------------------"
+                          "\nPackets sent: %24"PRIu64
+                          "\nPackets received: %20"PRIu64
+                          "\nPackets dropped: %21"PRIu64,
+                          portid,
+                          port_statistics[portid].tx,
+                          port_statistics[portid].rx,
+                          port_statistics[portid].dropped);
+
+               total_packets_dropped += port_statistics[portid].dropped;
+               total_packets_tx += port_statistics[portid].tx;
+               total_packets_rx += port_statistics[portid].rx;
+       }
+       printf("\nAggregate statistics ==============================="
+                  "\nTotal packets sent: %18"PRIu64
+                  "\nTotal packets received: %14"PRIu64
+                  "\nTotal packets dropped: %15"PRIu64,
+                  total_packets_tx,
+                  total_packets_rx,
+                  total_packets_dropped);
+       printf("\n====================================================\n");
+}
+
+/* Send the packet on an output interface */
+static int
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       unsigned ret;
+       unsigned queueid;
+
+       queueid = (uint16_t) qconf->tx_queue_id;
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
+
+       port_statistics[port].tx += ret;
+       if (unlikely(ret < n)) {
+               port_statistics[port].dropped += (n - ret);
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Send the packet on an output interface */
+static int
+l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+{
+       unsigned lcore_id, len;
+       struct lcore_queue_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+
+       qconf = &lcore_queue_conf[lcore_id];
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = m;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+       return 0;
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+       struct ether_hdr *eth;
+       void *tmp;
+       unsigned dst_port;
+
+       dst_port = l2fwd_dst_ports[portid];
+       eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+       /* 00:09:c0:00:00:xx */
+       tmp = &eth->d_addr.addr_bytes[0];
+       *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
+
+       /* src addr */
+       ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+
+       l2fwd_send_packet(m, (uint8_t) dst_port);
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_mbuf *m;
+       unsigned lcore_id;
+       unsigned int nb_ports;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc, timer_tsc;
+       unsigned i, j, portid, nb_rx;
+       struct lcore_queue_conf *qconf;
+
+       timer_tsc = 0;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports > L2FWD_MAX_PORTS)
+               nb_ports = L2FWD_MAX_PORTS;
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i];
+               RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+                       portid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       for (portid = 0; portid < nb_ports; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               l2fwd_send_burst(&lcore_queue_conf[lcore_id],
+                                                qconf->tx_mbufs[portid].len,
+                                                (uint8_t) portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       /* if timer is enabled */
+                       if (timer_period > 0) {
+
+                               /* advance the timer */
+                               timer_tsc += diff_tsc;
+
+                               /* if timer has reached its timeout */
+                               if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+
+                                       /* do this only on master core */
+                                       if (lcore_id == rte_get_master_lcore()) {
+                                               print_stats();
+                                               /* reset the timer */
+                                               timer_tsc = 0;
+                                       }
+                               }
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+
+                       portid = qconf->rx_queue_list[i];
+                       nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+                                                pkts_burst, MAX_PKT_BURST);
+
+                       port_statistics[portid].rx += nb_rx;
+
+                       for (j = 0; j < nb_rx; j++) {
+                               m = pkts_burst[j];
+                               rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+                               l2fwd_simple_forward(m, portid);
+                       }
+               }
+       }
+}
+
+static int
+l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
+{
+       l2fwd_main_loop();
+       return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+       printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+              "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+              "  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+              "  -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
+              prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+       char *end = NULL;
+       unsigned long n;
+
+       /* parse hexadecimal string */
+       n = strtoul(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+       if (n == 0)
+               return 0;
+       if (n >= MAX_RX_QUEUE_PER_LCORE)
+               return 0;
+
+       return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+       char *end = NULL;
+       int n;
+
+       /* parse number string */
+       n = strtol(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+       if (n >= MAX_TIMER_PERIOD)
+               return -1;
+
+       return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+                       if (l2fwd_enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* nqueue */
+               case 'q':
+                       l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+                       if (l2fwd_rx_queue_per_lcore == 0) {
+                               printf("invalid queue number\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* timer period */
+               case 'T':
+                       timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
+                       if (timer_period < 0) {
+                               printf("invalid timer period\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       l2fwd_usage(prgname);
+                       return -1;
+
+               default:
+                       l2fwd_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_queue_conf *qconf;
+       int ret;
+       unsigned int nb_ports;
+       unsigned portid;
+       unsigned lcore_id, rx_lcore_id;
+       unsigned last_port;
+       unsigned nb_ports_in_mask = 0;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = l2fwd_parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid L2FWD-VF parameters\n");
+
+       /* create the mbuf pool */
+       l2fwd_pktmbuf_pool =
+               rte_mempool_create("mbuf_pool", NB_MBUF,
+                                  MBUF_SIZE, 32,
+                                  sizeof(struct rte_pktmbuf_pool_private),
+                                  rte_pktmbuf_pool_init, NULL,
+                                  rte_pktmbuf_init, NULL,
+                                  SOCKET0, 0);
+       if (l2fwd_pktmbuf_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+       /* init driver(s) */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+
+       if (rte_ixgbevf_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbevf pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports == 0)
+               rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
+
+       if (nb_ports > L2FWD_MAX_PORTS)
+               nb_ports = L2FWD_MAX_PORTS;
+
+       /* reset l2fwd_dst_ports */
+       for (portid = 0; portid < L2FWD_MAX_PORTS; portid++)
+               l2fwd_dst_ports[portid] = 0;
+       last_port = 0;
+
+       rx_lcore_id = 0;
+       qconf = &lcore_queue_conf[rx_lcore_id];
+
+       /*
+        * Initialize the lcore/port-rx-queue configuration of each lcore.
+        * NOTE: Each logical core sends packets out to all port-tx-queues
+        */
+       for (portid = 0; portid < nb_ports; portid++) {
+
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               if (nb_ports_in_mask % 2) {
+                       l2fwd_dst_ports[portid] = last_port;
+                       l2fwd_dst_ports[last_port] = portid;
+               }
+               else
+                       last_port = portid;
+
+               nb_ports_in_mask++;
+
+               while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+                                      lcore_queue_conf[rx_lcore_id].n_rx_queue ==
+                                      l2fwd_rx_queue_per_lcore) {
+
+                       rx_lcore_id++;
+                       if (rx_lcore_id >= RTE_MAX_LCORE)
+                               rte_exit(EXIT_FAILURE, "Not enough cores\n");
+               }
+
+               qconf = &lcore_queue_conf[rx_lcore_id];
+               qconf->tx_queue_id = 0;
+               qconf->rx_queue_list[qconf->n_rx_queue] = portid;
+               qconf->n_rx_queue++;
+
+               printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
+
+       }
+
+       /* Initialise each port */
+       for (portid = 0; portid < nb_ports; portid++) {
+
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+                       printf("Skipping disabled port %u\n", portid);
+                       continue;
+               }
+
+               /* init port */
+               printf("Initializing port %u... ", portid);
+               fflush(stdout);
+               ret = rte_eth_dev_configure((uint8_t) portid, 1, 1, &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+                                 ret, portid);
+
+               rte_eth_macaddr_get((uint8_t) portid,
+                                   &l2fwd_ports_eth_addr[portid]);
+
+               /* init one RX queue */
+               fflush(stdout);
+               ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
+                                            SOCKET0, &rx_conf,
+                                            l2fwd_pktmbuf_pool);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+                                 ret, portid);
+
+               /* init one TX queue */
+               fflush(stdout);
+               ret = rte_eth_tx_queue_setup((uint8_t) portid, 0, nb_txd,
+                                            SOCKET0, &tx_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+                                 ret, portid);
+
+               /* Start device */
+               ret = rte_eth_dev_start((uint8_t) portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
+                                 ret, portid);
+
+               printf("done: ");
+               fflush(stdout);
+
+               printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+                               portid,
+                               l2fwd_ports_eth_addr[portid].addr_bytes[0],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[1],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[2],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[3],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[4],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+               /* initialize port stats */
+               memset(&port_statistics, 0, sizeof(port_statistics));
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/l2fwd-vf/main.h b/examples/l2fwd-vf/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf b/examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..0d83075
Binary files /dev/null and b/examples/l2fwd/482250_L2Forwarding_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/l2fwd/Makefile b/examples/l2fwd/Makefile
new file mode 100644 (file)
index 0000000..7d78bf7
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = l2fwd
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
new file mode 100644 (file)
index 0000000..75ddfc7
--- /dev/null
@@ -0,0 +1,745 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "main.h"
+
+#define RTE_LOGTYPE_L2FWD RTE_LOGTYPE_USER1
+
+#define L2FWD_MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr l2fwd_ports_eth_addr[L2FWD_MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t l2fwd_enabled_port_mask = 0;
+
+/* list of enabled ports */
+static uint32_t l2fwd_dst_ports[L2FWD_MAX_PORTS];
+
+static unsigned int l2fwd_rx_queue_per_lcore = 1;
+
+#define MAX_PKT_BURST 32
+struct mbuf_table {
+       unsigned len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+       unsigned n_rx_queue;
+       unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       unsigned tx_queue_id;
+       struct mbuf_table tx_mbufs[L2FWD_MAX_PORTS];
+
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct l2fwd_port_statistics {
+       uint64_t tx;
+       uint64_t rx;
+       uint64_t dropped;
+} __rte_cache_aligned;
+struct l2fwd_port_statistics port_statistics[L2FWD_MAX_PORTS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+       uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+       unsigned portid;
+
+       total_packets_dropped = 0;
+       total_packets_tx = 0;
+       total_packets_rx = 0;
+
+       const char clr[] = { 27, '[', '2', 'J', '\0' };
+       const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+
+               /* Clear screen and move to top left */
+       printf("%s%s", clr, topLeft);
+
+       printf("\nPort statistics ====================================");
+
+       for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) {
+               /* skip disabled ports */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+               printf("\nStatistics for port %u ------------------------------"
+                          "\nPackets sent: %24"PRIu64
+                          "\nPackets received: %20"PRIu64
+                          "\nPackets dropped: %21"PRIu64,
+                          portid,
+                          port_statistics[portid].tx,
+                          port_statistics[portid].rx,
+                          port_statistics[portid].dropped);
+
+               total_packets_dropped += port_statistics[portid].dropped;
+               total_packets_tx += port_statistics[portid].tx;
+               total_packets_rx += port_statistics[portid].rx;
+       }
+       printf("\nAggregate statistics ==============================="
+                  "\nTotal packets sent: %18"PRIu64
+                  "\nTotal packets received: %14"PRIu64
+                  "\nTotal packets dropped: %15"PRIu64,
+                  total_packets_tx,
+                  total_packets_rx,
+                  total_packets_dropped);
+       printf("\n====================================================\n");
+}
+
+/* Send the packet on an output interface */
+static int
+l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       unsigned ret;
+       unsigned queueid;
+
+       queueid = (uint16_t) qconf->tx_queue_id;
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
+       port_statistics[port].tx += ret;
+       if (unlikely(ret < n)) {
+               port_statistics[port].dropped += (n - ret);
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Send the packet on an output interface */
+static int
+l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+{
+       unsigned lcore_id, len;
+       struct lcore_queue_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+
+       qconf = &lcore_queue_conf[lcore_id];
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = m;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               l2fwd_send_burst(qconf, MAX_PKT_BURST, port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+       return 0;
+}
+
+static void
+l2fwd_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+       struct ether_hdr *eth;
+       void *tmp;
+       unsigned dst_port;
+
+       dst_port = l2fwd_dst_ports[portid];
+       eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+       /* 00:09:c0:00:00:xx */
+       tmp = &eth->d_addr.addr_bytes[0];
+       *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
+
+       /* src addr */
+       ether_addr_copy(&l2fwd_ports_eth_addr[dst_port], &eth->s_addr);
+
+       l2fwd_send_packet(m, (uint8_t) dst_port);
+}
+
+/* main processing loop */
+static void
+l2fwd_main_loop(void)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_mbuf *m;
+       unsigned lcore_id;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc, timer_tsc;
+       unsigned i, j, portid, nb_rx;
+       struct lcore_queue_conf *qconf;
+
+       timer_tsc = 0;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, L2FWD, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, L2FWD, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i];
+               RTE_LOG(INFO, L2FWD, " -- lcoreid=%u portid=%u\n", lcore_id,
+                       portid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       /* this could be optimized (use queueid instead of
+                        * portid), but it is not called so often */
+                       for (portid = 0; portid < L2FWD_MAX_PORTS; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               l2fwd_send_burst(&lcore_queue_conf[lcore_id],
+                                                qconf->tx_mbufs[portid].len,
+                                                (uint8_t) portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       /* if timer is enabled */
+                       if (timer_period > 0) {
+
+                               /* advance the timer */
+                               timer_tsc += diff_tsc;
+
+                               /* if timer has reached its timeout */
+                               if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+
+                                       /* do this only on master core */
+                                       if (lcore_id == rte_get_master_lcore()) {
+                                               print_stats();
+                                               /* reset the timer */
+                                               timer_tsc = 0;
+                                       }
+                               }
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+
+                       portid = qconf->rx_queue_list[i];
+                       nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+                                                pkts_burst, MAX_PKT_BURST);
+
+                       port_statistics[portid].rx += nb_rx;
+
+                       for (j = 0; j < nb_rx; j++) {
+                               m = pkts_burst[j];
+                               rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+                               l2fwd_simple_forward(m, portid);
+                       }
+               }
+       }
+}
+
+static int
+l2fwd_launch_one_lcore(__attribute__((unused)) void *dummy)
+{
+       l2fwd_main_loop();
+       return 0;
+}
+
+/* display usage */
+static void
+l2fwd_usage(const char *prgname)
+{
+       printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+              "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+              "  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+                  "  -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
+              prgname);
+}
+
+static int
+l2fwd_parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static unsigned int
+l2fwd_parse_nqueue(const char *q_arg)
+{
+       char *end = NULL;
+       unsigned long n;
+
+       /* parse hexadecimal string */
+       n = strtoul(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+       if (n == 0)
+               return 0;
+       if (n >= MAX_RX_QUEUE_PER_LCORE)
+               return 0;
+
+       return n;
+}
+
+static int
+l2fwd_parse_timer_period(const char *q_arg)
+{
+       char *end = NULL;
+       int n;
+
+       /* parse number string */
+       n = strtol(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+       if (n >= MAX_TIMER_PERIOD)
+               return -1;
+
+       return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+l2fwd_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       l2fwd_enabled_port_mask = l2fwd_parse_portmask(optarg);
+                       if (l2fwd_enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* nqueue */
+               case 'q':
+                       l2fwd_rx_queue_per_lcore = l2fwd_parse_nqueue(optarg);
+                       if (l2fwd_rx_queue_per_lcore == 0) {
+                               printf("invalid queue number\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* timer period */
+               case 'T':
+                       timer_period = l2fwd_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
+                       if (timer_period < 0) {
+                               printf("invalid timer period\n");
+                               l2fwd_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       l2fwd_usage(prgname);
+                       return -1;
+
+               default:
+                       l2fwd_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_queue_conf *qconf;
+       struct rte_eth_dev_info dev_info;
+       struct rte_eth_link link;
+       int ret;
+       unsigned int nb_ports, nb_lcores;
+       unsigned portid, last_port, queueid = 0;
+       unsigned lcore_id, rx_lcore_id;
+       unsigned n_tx_queue, max_tx_queues;
+       unsigned nb_ports_in_mask = 0;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = l2fwd_parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid L2FWD arguments\n");
+
+       /* create the mbuf pool */
+       l2fwd_pktmbuf_pool =
+               rte_mempool_create("mbuf_pool", NB_MBUF,
+                                  MBUF_SIZE, 32,
+                                  sizeof(struct rte_pktmbuf_pool_private),
+                                  rte_pktmbuf_pool_init, NULL,
+                                  rte_pktmbuf_init, NULL,
+                                  SOCKET0, 0);
+       if (l2fwd_pktmbuf_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+       /* init driver(s) */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports == 0)
+               rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+       if (nb_ports > L2FWD_MAX_PORTS)
+               nb_ports = L2FWD_MAX_PORTS;
+
+       nb_lcores = rte_lcore_count();
+
+       /* reset l2fwd_dst_ports */
+       for (portid = 0; portid < L2FWD_MAX_PORTS; portid++)
+               l2fwd_dst_ports[portid] = 0;
+       last_port = 0;
+
+       /*
+        * Each logical core is assigned a dedicated TX queue on each port.
+        * Compute the maximum number of TX queues that can be used.
+        */
+       max_tx_queues = nb_lcores;
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               if (nb_ports_in_mask % 2) {
+                       l2fwd_dst_ports[portid] = last_port;
+                       l2fwd_dst_ports[last_port] = portid;
+               }
+               else
+                       last_port = portid;
+
+               nb_ports_in_mask++;
+
+               rte_eth_dev_info_get((uint8_t) portid, &dev_info);
+               if (max_tx_queues > dev_info.max_tx_queues)
+                       max_tx_queues = dev_info.max_tx_queues;
+       }
+
+       if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2) {
+               rte_exit(EXIT_FAILURE, "invalid number of ports in portmask. "
+                       "Should be an even number.\n");
+       }
+
+       rx_lcore_id = 0;
+       n_tx_queue = 0;
+       qconf = NULL;
+
+       /* Initialize the port/queue configuration of each logical core */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               /* get the lcore_id for this port */
+               while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+                      lcore_queue_conf[rx_lcore_id].n_rx_queue ==
+                      l2fwd_rx_queue_per_lcore) {
+
+                       rx_lcore_id++;
+                       if (rx_lcore_id >= RTE_MAX_LCORE)
+                               rte_exit(EXIT_FAILURE, "Not enough cores\n");
+               }
+               if (qconf != &lcore_queue_conf[rx_lcore_id]) {
+                       if (n_tx_queue == max_tx_queues)
+                               rte_exit(EXIT_FAILURE,
+                                       "Not enough TX queues\n");
+                       /* Assigned a new logical core in the loop above. */
+                       qconf = &lcore_queue_conf[rx_lcore_id];
+                       qconf->tx_queue_id = n_tx_queue;
+                       n_tx_queue++;
+               }
+               qconf->rx_queue_list[qconf->n_rx_queue] = portid;
+               qconf->n_rx_queue++;
+               printf("Lcore %u: RX port %u TX queue %u\n",
+                      rx_lcore_id, portid, qconf->tx_queue_id);
+       }
+
+       /* Initialise each port */
+       for (portid = 0; portid < nb_ports; portid++) {
+
+               /* skip ports that are not enabled */
+               if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
+                       printf("Skipping disabled port %u\n", portid);
+                       continue;
+               }
+               /* init port */
+               printf("Initializing port %u... ", portid);
+               fflush(stdout);
+               ret = rte_eth_dev_configure((uint8_t) portid, 1,
+                                           (uint16_t) n_tx_queue, &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: "
+                                       "err=%d, port=%u\n",
+                                 ret, portid);
+
+               rte_eth_macaddr_get((uint8_t) portid,
+                                   &l2fwd_ports_eth_addr[portid]);
+
+               /* init one RX queue */
+               fflush(stdout);
+               ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
+                                            SOCKET0, &rx_conf,
+                                            l2fwd_pktmbuf_pool);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+                                       "err=%d, port=%u\n",
+                                 ret, portid);
+
+               /* init one TX queue logical core on each port */
+               for (queueid = 0; queueid < n_tx_queue; queueid++) {
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup((uint8_t) portid,
+                                                    (uint16_t) queueid, nb_txd,
+                                                    SOCKET0, &tx_conf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
+                                               "err=%d, port=%u queue=%u\n",
+                                         ret, portid, queueid);
+               }
+
+               /* Start device */
+               ret = rte_eth_dev_start((uint8_t) portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: "
+                                       "err=%d, port=%u\n",
+                                 ret, portid);
+
+               printf("done: ");
+
+               /* get link status */
+               rte_eth_link_get((uint8_t) portid, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (unsigned) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               } else {
+                       printf(" Link Down\n");
+               }
+
+               rte_eth_promiscuous_enable((uint8_t)portid);
+
+               printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+                               portid,
+                               l2fwd_ports_eth_addr[portid].addr_bytes[0],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[1],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[2],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[3],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[4],
+                               l2fwd_ports_eth_addr[portid].addr_bytes[5]);
+
+               /* initialize port stats */
+               memset(&port_statistics, 0, sizeof(port_statistics));
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(l2fwd_launch_one_lcore, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/l2fwd/main.h b/examples/l2fwd/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf b/examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..4cea125
Binary files /dev/null and b/examples/l3fwd-vf/496040_L3Forwarding_VirtEnv_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/l3fwd-vf/Makefile b/examples/l3fwd-vf/Makefile
new file mode 100644 (file)
index 0000000..11e2b7c
--- /dev/null
@@ -0,0 +1,58 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = l3fwd-vf
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
new file mode 100644 (file)
index 0000000..a7f4cce
--- /dev/null
@@ -0,0 +1,1079 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+
+#define APP_LOOKUP_EXACT_MATCH          0
+#define APP_LOOKUP_LPM                  1
+#define DO_RFC_1812_CHECKS
+
+//#define APP_LOOKUP_METHOD             APP_LOOKUP_EXACT_MATCH
+#ifndef APP_LOOKUP_METHOD
+#define APP_LOOKUP_METHOD             APP_LOOKUP_LPM
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_jhash.h>
+#elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+#include <rte_lpm.h>
+#else
+#error "APP_LOOKUP_METHOD set to incorrect value"
+#endif
+
+#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
+
+#define MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define NB_SOCKETS 8
+
+#define SOCKET0 0
+
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET        3
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr ports_eth_addr[MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t enabled_port_mask = 0;
+static int numa_on = 1; /**< NUMA is enabled by default. */
+
+struct mbuf_table {
+       uint16_t len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+struct lcore_rx_queue {
+       uint8_t port_id;
+       uint8_t queue_id;
+} __rte_cache_aligned;
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 1
+#define MAX_RX_QUEUE_PER_PORT 1
+
+#define MAX_LCORE_PARAMS 1024
+struct lcore_params {
+       uint8_t port_id;
+       uint8_t queue_id;
+       uint8_t lcore_id;
+} __rte_cache_aligned;
+
+static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+static struct lcore_params lcore_params_array_default[] = {
+       {0, 0, 2},
+       {0, 1, 2},
+       {0, 2, 2},
+       {1, 0, 2},
+       {1, 1, 2},
+       {1, 2, 2},
+       {2, 0, 2},
+       {3, 0, 3},
+       {3, 1, 3},
+};
+
+static struct lcore_params * lcore_params = lcore_params_array_default;
+static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
+                               sizeof(lcore_params_array_default[0]);
+
+static struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 1, /**< CRC stripped by hardware */
+       },
+       .rx_adv_conf = {
+               .rss_conf = {
+                       .rss_key = NULL,
+                       .rss_hf = ETH_RSS_IPV4,
+               },
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
+
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+struct ipv4_5tuple {
+       uint32_t ip_dst;
+       uint32_t ip_src;
+       uint16_t port_dst;
+       uint16_t port_src;
+       uint8_t proto;
+} __attribute__((__packed__));
+
+struct l3fwd_route {
+       struct ipv4_5tuple key;
+       uint8_t if_out;
+};
+
+static struct l3fwd_route l3fwd_route_array[] = {
+       {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
+       {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
+       {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
+       {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
+};
+
+typedef struct rte_hash lookup_struct_t;
+static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
+
+#define L3FWD_HASH_ENTRIES     1024
+struct rte_hash_parameters l3fwd_hash_params = {
+       .name = "l3fwd_hash_0",
+       .entries = L3FWD_HASH_ENTRIES,
+       .bucket_entries = 4,
+       .key_len = sizeof(struct ipv4_5tuple),
+       .hash_func = rte_hash_crc,
+       .hash_func_init_val = 0,
+       .socket_id = SOCKET0,
+};
+
+#define L3FWD_NUM_ROUTES \
+       (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
+
+static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+struct l3fwd_route {
+       uint32_t ip;
+       uint8_t  depth;
+       uint8_t  if_out;
+};
+
+static struct l3fwd_route l3fwd_route_array[] = {
+       {IPv4(1,1,1,0), 24, 0},
+       {IPv4(2,1,1,0), 24, 1},
+       {IPv4(3,1,1,0), 24, 2},
+       {IPv4(4,1,1,0), 24, 3},
+       {IPv4(5,1,1,0), 24, 4},
+       {IPv4(6,1,1,0), 24, 5},
+       {IPv4(7,1,1,0), 24, 6},
+       {IPv4(8,1,1,0), 24, 7},
+};
+
+#define L3FWD_NUM_ROUTES \
+       (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
+
+#define L3FWD_LPM_MAX_RULES     1024
+
+typedef struct rte_lpm lookup_struct_t;
+static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
+#endif
+
+struct lcore_conf {
+       uint16_t n_rx_queue;
+       struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       uint16_t tx_queue_id;
+       struct mbuf_table tx_mbufs[MAX_PORTS];
+       lookup_struct_t * lookup_struct;
+} __rte_cache_aligned;
+
+static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
+/* Send burst of packets on an output interface */
+static inline int
+send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       int ret;
+       uint16_t queueid;
+
+       queueid = qconf->tx_queue_id;
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, queueid, m_table, n);
+       if (unlikely(ret < n)) {
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static inline int
+send_single_packet(struct rte_mbuf *m, uint8_t port)
+{
+       uint32_t lcore_id;
+       uint16_t len;
+       struct lcore_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+
+       qconf = &lcore_conf[lcore_id];
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = m;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               send_burst(qconf, MAX_PKT_BURST, port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+       return 0;
+}
+
+#ifdef DO_RFC_1812_CHECKS
+static inline int
+is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+{
+       /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
+       /*
+        * 1. The packet length reported by the Link Layer must be large
+        * enough to hold the minimum length legal IP datagram (20 bytes).
+        */
+       if (link_len < sizeof(struct ipv4_hdr))
+               return -1;
+
+       /* 2. The IP checksum must be correct. */
+       /* this is checked in H/W */
+
+       /*
+        * 3. The IP version number must be 4. If the version number is not 4
+        * then the packet may be another version of IP, such as IPng or
+        * ST-II.
+        */
+       if (((pkt->version_ihl) >> 4) != 4)
+               return -3;
+       /*
+        * 4. The IP header length field must be large enough to hold the
+        * minimum length legal IP datagram (20 bytes = 5 words).
+        */
+       if ((pkt->version_ihl & 0xf) < 5)
+               return -4;
+
+       /*
+        * 5. The IP total length field must be large enough to hold the IP
+        * datagram header, whose length is specified in the IP header length
+        * field.
+        */
+       if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+               return -5;
+
+       return 0;
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+static void
+print_key(struct ipv4_5tuple key)
+{
+       printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
+              (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
+}
+
+static inline uint8_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       struct ipv4_5tuple key;
+       struct tcp_hdr *tcp;
+       struct udp_hdr *udp;
+       int ret = 0;
+
+       key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+       key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
+       key.proto = ipv4_hdr->next_proto_id;
+
+       switch (ipv4_hdr->next_proto_id) {
+       case IPPROTO_TCP:
+               tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
+                                       sizeof(struct ipv4_hdr));
+               key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
+               key.port_src = rte_be_to_cpu_16(tcp->src_port);
+               break;
+
+       case IPPROTO_UDP:
+               udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
+                                       sizeof(struct ipv4_hdr));
+               key.port_dst = rte_be_to_cpu_16(udp->dst_port);
+               key.port_src = rte_be_to_cpu_16(udp->src_port);
+               break;
+
+       default:
+               key.port_dst = 0;
+               key.port_src = 0;
+       }
+
+       /* Find destination port */
+       ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
+       return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+static inline uint8_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       uint8_t next_hop;
+
+       return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
+                       rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
+                       next_hop : portid);
+}
+#endif
+
+static inline void
+l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       struct ether_hdr *eth_hdr;
+       struct ipv4_hdr *ipv4_hdr;
+       void *tmp;
+       uint8_t dst_port;
+
+       eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+       ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
+                               sizeof(struct ether_hdr));
+
+#ifdef DO_RFC_1812_CHECKS
+       /* Check to make sure the packet is valid (RFC1812) */
+       if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+               rte_pktmbuf_free(m);
+               return;
+       }
+#endif
+
+       dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct);
+       if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0)
+               dst_port = portid;
+
+       /* 00:09:c0:00:00:xx */
+       tmp = &eth_hdr->d_addr.addr_bytes[0];
+       *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
+
+#ifdef DO_RFC_1812_CHECKS
+       /* Update time to live and header checksum */
+       --(ipv4_hdr->time_to_live);
+       ++(ipv4_hdr->hdr_checksum);
+#endif
+
+       /* src addr */
+       ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
+
+       send_single_packet(m, dst_port);
+
+}
+
+/* main processing loop */
+static __attribute__((noreturn)) int
+main_loop(__attribute__((unused)) void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       unsigned lcore_id;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc;
+       int i, j, nb_rx;
+       uint8_t portid, queueid;
+       struct lcore_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_conf[lcore_id];
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i].port_id;
+               queueid = qconf->rx_queue_list[i].queue_id;
+               RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
+                       portid, queueid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       /*
+                        * This could be optimized (use queueid instead of
+                        * portid), but it is not called so often
+                        */
+                       for (portid = 0; portid < MAX_PORTS; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               send_burst(&lcore_conf[lcore_id],
+                                       qconf->tx_mbufs[portid].len,
+                                       portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; ++i) {
+
+                       portid = qconf->rx_queue_list[i].port_id;
+                       queueid = qconf->rx_queue_list[i].queue_id;
+                       nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
+
+                       /* Prefetch first packets */
+                       for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(
+                                               pkts_burst[j], void *));
+                       }
+
+                       /* Prefetch and forward already prefetched packets */
+                       for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+                                               j + PREFETCH_OFFSET], void *));
+                               l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
+                       }
+
+                       /* Forward remaining prefetched packets */
+                       for (; j < nb_rx; j++) {
+                               l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
+                       }
+               }
+       }
+}
+
+static int
+check_lcore_params(void)
+{
+       uint8_t queue, lcore;
+       uint16_t i;
+       int socketid;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               queue = lcore_params[i].queue_id;
+               if (queue >= MAX_RX_QUEUE_PER_PORT) {
+                       printf("invalid queue number: %hhu\n", queue);
+                       return -1;
+               }
+               lcore = lcore_params[i].lcore_id;
+               if (!rte_lcore_is_enabled(lcore)) {
+                       printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+                       return -1;
+               }
+               if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
+                       (numa_on == 0)) {
+                       printf("warning: lcore %hhu is on socket %d with numa off \n",
+                               lcore, socketid);
+               }
+       }
+       return 0;
+}
+
+static int
+check_port_config(const unsigned nb_ports)
+{
+       unsigned portid;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               portid = lcore_params[i].port_id;
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("port %u is not enabled in port mask\n", portid);
+                       return -1;
+               }
+               if (portid >= nb_ports) {
+                       printf("port %u is not present on the board\n", portid);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static uint8_t
+get_port_n_rx_queues(const uint8_t port)
+{
+       int queue = -1;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
+                       queue = lcore_params[i].queue_id;
+       }
+       return (uint8_t)(++queue);
+}
+
+static int
+init_lcore_rx_queues(void)
+{
+       uint16_t i, nb_rx_queue;
+       uint8_t lcore;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               lcore = lcore_params[i].lcore_id;
+               nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+               if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+                       printf("error: too many queues (%u) for lcore: %u\n",
+                               (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+                       return -1;
+               } else {
+                       lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+                               lcore_params[i].port_id;
+                       lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
+                               lcore_params[i].queue_id;
+                       lcore_conf[lcore].n_rx_queue++;
+               }
+       }
+       return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+       printf ("%s [EAL options] -- -p PORTMASK"
+               "  [--config (port,queue,lcore)[,(port,queue,lcore]]\n"
+               "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+               "  --config (port,queue,lcore): rx queues configuration\n"
+               "  --no-numa: optional, disable numa awareness\n",
+               prgname);
+}
+
+static int
+parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static int
+parse_config(const char *q_arg)
+{
+       char s[256];
+       const char *p, *p0 = q_arg;
+       char *end;
+       enum fieldnames {
+               FLD_PORT = 0,
+               FLD_QUEUE,
+               FLD_LCORE,
+               _NUM_FLD
+       };
+       unsigned long int_fld[_NUM_FLD];
+       char *str_fld[_NUM_FLD];
+       int i;
+       unsigned size;
+
+       nb_lcore_params = 0;
+
+       while ((p = strchr(p0,'(')) != NULL) {
+               ++p;
+               if((p0 = strchr(p,')')) == NULL)
+                       return -1;
+
+               size = p0 - p;
+               if(size >= sizeof(s))
+                       return -1;
+
+               rte_snprintf(s, sizeof(s), "%.*s", size, p);
+               if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+                       return -1;
+               for (i = 0; i < _NUM_FLD; i++){
+                       errno = 0;
+                       int_fld[i] = strtoul(str_fld[i], &end, 0);
+                       if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+                               return -1;
+               }
+               if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+                       printf("exceeded max number of lcore params: %hu\n",
+                               nb_lcore_params);
+                       return -1;
+               }
+               lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+               lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
+               lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
+               ++nb_lcore_params;
+       }
+       lcore_params = lcore_params_array;
+       return 0;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {"config", 1, 0, 0},
+               {"no-numa", 0, 0, 0},
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:",
+                               lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       enabled_port_mask = parse_portmask(optarg);
+                       if (enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       if (!strcmp(lgopts[option_index].name, "config")) {
+                               ret = parse_config(optarg);
+                               if (ret) {
+                                       printf("invalid config\n");
+                                       print_usage(prgname);
+                                       return -1;
+                               }
+                       }
+
+                       if (!strcmp(lgopts[option_index].name, "no-numa")) {
+                               printf("numa is disabled \n");
+                               numa_on = 0;
+                       }
+                       break;
+
+               default:
+                       print_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+static void
+print_ethaddr(const char *name, const struct ether_addr *eth_addr)
+{
+       printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+               eth_addr->addr_bytes[0],
+               eth_addr->addr_bytes[1],
+               eth_addr->addr_bytes[2],
+               eth_addr->addr_bytes[3],
+               eth_addr->addr_bytes[4],
+               eth_addr->addr_bytes[5]);
+}
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+static void
+setup_hash(int socketid)
+{
+       unsigned i;
+       int ret;
+       char s[64];
+
+       /* create  hashes */
+       rte_snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid);
+       l3fwd_hash_params.name = s;
+       l3fwd_hash_params.socket_id = socketid;
+       l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params);
+       if (l3fwd_lookup_struct[socketid] == NULL)
+               rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
+                               "socket %d\n", socketid);
+
+       /* populate the hash */
+       for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
+               ret = rte_hash_add_key (l3fwd_lookup_struct[socketid],
+                               (void *) &l3fwd_route_array[i].key);
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
+                               "l3fwd hash on socket %d\n", i, socketid);
+               }
+               l3fwd_out_if[ret] = l3fwd_route_array[i].if_out;
+               printf("Hash: Adding key\n");
+               print_key(l3fwd_route_array[i].key);
+       }
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+static void
+setup_lpm(int socketid)
+{
+       unsigned i;
+       int ret;
+       char s[64];
+
+       /* create the LPM table */
+       rte_snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid);
+       l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
+                               L3FWD_LPM_MAX_RULES, RTE_LPM_MEMZONE);
+       if (l3fwd_lookup_struct[socketid] == NULL)
+               rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
+                               " on socket %d\n", socketid);
+
+       /* populate the LPM table */
+       for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
+               ret = rte_lpm_add(l3fwd_lookup_struct[socketid],
+                       l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
+                               "l3fwd LPM table on socket %d\n",
+                               i, socketid);
+               }
+
+               printf("LPM: Adding route 0x%08x / %d (%d)\n",
+                       (unsigned)l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+       }
+}
+#endif
+
+static int
+init_mem(void)
+{
+       struct lcore_conf *qconf;
+       int socketid;
+       unsigned lcore_id;
+       char s[64];
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+
+               if (numa_on)
+                       socketid = rte_lcore_to_socket_id(lcore_id);
+               else
+                       socketid = 0;
+
+               if (socketid >= NB_SOCKETS) {
+                       rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
+                               socketid, lcore_id, NB_SOCKETS);
+               }
+               if (pktmbuf_pool[socketid] == NULL) {
+                       rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
+                       pktmbuf_pool[socketid] =
+                               rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
+                                       sizeof(struct rte_pktmbuf_pool_private),
+                                       rte_pktmbuf_pool_init, NULL,
+                                       rte_pktmbuf_init, NULL,
+                                       socketid, 0);
+                       if (pktmbuf_pool[socketid] == NULL)
+                               rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", socketid);
+                       else
+                               printf("Allocated mbuf pool on socket %d\n", socketid);
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+                       setup_lpm(socketid);
+#else
+                       setup_hash(socketid);
+#endif
+               }
+               qconf = &lcore_conf[lcore_id];
+               qconf->lookup_struct = l3fwd_lookup_struct[socketid];
+       }
+       return 0;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_conf *qconf;
+       int ret;
+       unsigned nb_ports;
+       uint16_t queueid;
+       unsigned lcore_id;
+       uint8_t portid, nb_rx_queue, queue, socketid;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid L3FWD-VF parameters\n");
+
+       if (check_lcore_params() < 0)
+               rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
+
+       ret = init_lcore_rx_queues();
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
+
+       ret = init_mem();
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "init_mem failed\n");
+
+       /* init driver */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+
+       if (rte_ixgbevf_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbevf pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports > MAX_PORTS)
+               nb_ports = MAX_PORTS;
+
+       if (check_port_config(nb_ports) < 0)
+               rte_exit(EXIT_FAILURE, "check_port_config failed\n");
+
+       /* initialize all ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("\nSkipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               /* init port */
+               printf("Initializing port %d ... ", portid );
+               fflush(stdout);
+
+               /* must always equal(=1) */
+               nb_rx_queue = get_port_n_rx_queues(portid);
+
+               printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+                       nb_rx_queue, (unsigned)1 );
+               ret = rte_eth_dev_configure(portid, nb_rx_queue, 1, &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
+                               ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+
+               /* init one TX queue */
+               socketid = 0;
+
+               printf("txq=%d,%d ", 0, socketid);
+               fflush(stdout);
+               ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
+                                                socketid, &tx_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
+                               "port=%d\n", ret, portid);
+
+               printf("\n");
+       }
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+               qconf = &lcore_conf[lcore_id];
+               qconf->tx_queue_id = 0;
+
+               printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
+               fflush(stdout);
+               /* init RX queues */
+               for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
+                       portid = qconf->rx_queue_list[queue].port_id;
+                       queueid = qconf->rx_queue_list[queue].queue_id;
+
+                       if (numa_on)
+                               socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
+                       else
+                               socketid = 0;
+
+                       printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+                       fflush(stdout);
+
+                       ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+                                               socketid, &rx_conf, pktmbuf_pool[socketid]);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
+                                               "port=%d\n", ret, portid);
+               }
+       }
+       printf("\n");
+
+       /* start ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       continue;
+               }
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
+                               ret, portid);
+
+               printf("done: Port %d\n", portid);
+
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/l3fwd-vf/main.h b/examples/l3fwd-vf/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf b/examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf
new file mode 100644 (file)
index 0000000..304b436
Binary files /dev/null and b/examples/l3fwd/482251_L3Forwarding_Sample_App_Guide_Rev1.2.pdf differ
diff --git a/examples/l3fwd/Makefile b/examples/l3fwd/Makefile
new file mode 100644 (file)
index 0000000..de46278
--- /dev/null
@@ -0,0 +1,58 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = l3fwd
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
new file mode 100644 (file)
index 0000000..3b6e5b6
--- /dev/null
@@ -0,0 +1,1118 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+
+#define APP_LOOKUP_EXACT_MATCH          0
+#define APP_LOOKUP_LPM                  1
+#define DO_RFC_1812_CHECKS
+
+//#define APP_LOOKUP_METHOD             APP_LOOKUP_EXACT_MATCH
+#ifndef APP_LOOKUP_METHOD
+#define APP_LOOKUP_METHOD             APP_LOOKUP_LPM
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+#include <rte_hash.h>
+#include <rte_hash_crc.h>
+#include <rte_jhash.h>
+#elif (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+#include <rte_lpm.h>
+#else
+#error "APP_LOOKUP_METHOD set to incorrect value"
+#endif
+
+#define RTE_LOGTYPE_L3FWD RTE_LOGTYPE_USER1
+
+#define MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define NB_SOCKETS 8
+
+#define SOCKET0 0
+
+/* Configure how many packets ahead to prefetch, when reading packets */
+#define PREFETCH_OFFSET        3
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr ports_eth_addr[MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t enabled_port_mask = 0;
+static int promiscuous_on = 0; /**< Ports set in promiscuous mode off by default. */
+static int numa_on = 1; /**< NUMA is enabled by default. */
+
+struct mbuf_table {
+       uint16_t len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+struct lcore_rx_queue {
+       uint8_t port_id;
+       uint8_t queue_id;
+} __rte_cache_aligned;
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT MAX_PORTS
+#define MAX_RX_QUEUE_PER_PORT 128
+
+#define MAX_LCORE_PARAMS 1024
+struct lcore_params {
+       uint8_t port_id;
+       uint8_t queue_id;
+       uint8_t lcore_id;
+} __rte_cache_aligned;
+
+static struct lcore_params lcore_params_array[MAX_LCORE_PARAMS];
+static struct lcore_params lcore_params_array_default[] = {
+       {0, 0, 2},
+       {0, 1, 2},
+       {0, 2, 2},
+       {1, 0, 2},
+       {1, 1, 2},
+       {1, 2, 2},
+       {2, 0, 2},
+       {3, 0, 3},
+       {3, 1, 3},
+};
+
+static struct lcore_params * lcore_params = lcore_params_array_default;
+static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
+                               sizeof(lcore_params_array_default[0]);
+
+static struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .rx_adv_conf = {
+               .rss_conf = {
+                       .rss_key = NULL,
+                       .rss_hf = ETH_RSS_IPV4,
+               },
+       },
+       .txmode = {
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
+
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+struct ipv4_5tuple {
+       uint32_t ip_dst;
+       uint32_t ip_src;
+       uint16_t port_dst;
+       uint16_t port_src;
+       uint8_t proto;
+} __attribute__((__packed__));
+
+struct l3fwd_route {
+       struct ipv4_5tuple key;
+       uint8_t if_out;
+};
+
+static struct l3fwd_route l3fwd_route_array[] = {
+       {{IPv4(100,10,0,1), IPv4(200,10,0,1), 101, 11, IPPROTO_TCP}, 0},
+       {{IPv4(100,20,0,2), IPv4(200,20,0,2), 102, 12, IPPROTO_TCP}, 1},
+       {{IPv4(100,30,0,3), IPv4(200,30,0,3), 103, 13, IPPROTO_TCP}, 2},
+       {{IPv4(100,40,0,4), IPv4(200,40,0,4), 104, 14, IPPROTO_TCP}, 3},
+};
+
+typedef struct rte_hash lookup_struct_t;
+static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
+
+#define L3FWD_HASH_ENTRIES     1024
+struct rte_hash_parameters l3fwd_hash_params = {
+       .name = "l3fwd_hash_0",
+       .entries = L3FWD_HASH_ENTRIES,
+       .bucket_entries = 4,
+       .key_len = sizeof(struct ipv4_5tuple),
+       .hash_func = rte_hash_crc,
+       .hash_func_init_val = 0,
+       .socket_id = SOCKET0,
+};
+
+#define L3FWD_NUM_ROUTES \
+       (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
+
+static uint8_t l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+struct l3fwd_route {
+       uint32_t ip;
+       uint8_t  depth;
+       uint8_t  if_out;
+};
+
+static struct l3fwd_route l3fwd_route_array[] = {
+       {IPv4(1,1,1,0), 24, 0},
+       {IPv4(2,1,1,0), 24, 1},
+       {IPv4(3,1,1,0), 24, 2},
+       {IPv4(4,1,1,0), 24, 3},
+       {IPv4(5,1,1,0), 24, 4},
+       {IPv4(6,1,1,0), 24, 5},
+       {IPv4(7,1,1,0), 24, 6},
+       {IPv4(8,1,1,0), 24, 7},
+};
+
+#define L3FWD_NUM_ROUTES \
+       (sizeof(l3fwd_route_array) / sizeof(l3fwd_route_array[0]))
+
+#define L3FWD_LPM_MAX_RULES     1024
+
+typedef struct rte_lpm lookup_struct_t;
+static lookup_struct_t *l3fwd_lookup_struct[NB_SOCKETS];
+#endif
+
+struct lcore_conf {
+       uint16_t n_rx_queue;
+       struct lcore_rx_queue rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       uint16_t tx_queue_id[MAX_PORTS];
+       struct mbuf_table tx_mbufs[MAX_PORTS];
+       lookup_struct_t * lookup_struct;
+} __rte_cache_aligned;
+
+static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
+
+/* Send burst of packets on an output interface */
+static inline int
+send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       int ret;
+       uint16_t queueid;
+
+       queueid = qconf->tx_queue_id[port];
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, queueid, m_table, n);
+       if (unlikely(ret < n)) {
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Enqueue a single packet, and send burst if queue is filled */
+static inline int
+send_single_packet(struct rte_mbuf *m, uint8_t port)
+{
+       uint32_t lcore_id;
+       uint16_t len;
+       struct lcore_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+
+       qconf = &lcore_conf[lcore_id];
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = m;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               send_burst(qconf, MAX_PKT_BURST, port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+       return 0;
+}
+
+#ifdef DO_RFC_1812_CHECKS
+static inline int
+is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
+{
+       /* From http://www.rfc-editor.org/rfc/rfc1812.txt section 5.2.2 */
+       /*
+        * 1. The packet length reported by the Link Layer must be large
+        * enough to hold the minimum length legal IP datagram (20 bytes).
+        */
+       if (link_len < sizeof(struct ipv4_hdr))
+               return -1;
+
+       /* 2. The IP checksum must be correct. */
+       /* this is checked in H/W */
+
+       /*
+        * 3. The IP version number must be 4. If the version number is not 4
+        * then the packet may be another version of IP, such as IPng or
+        * ST-II.
+        */
+       if (((pkt->version_ihl) >> 4) != 4)
+               return -3;
+       /*
+        * 4. The IP header length field must be large enough to hold the
+        * minimum length legal IP datagram (20 bytes = 5 words).
+        */
+       if ((pkt->version_ihl & 0xf) < 5)
+               return -4;
+
+       /*
+        * 5. The IP total length field must be large enough to hold the IP
+        * datagram header, whose length is specified in the IP header length
+        * field.
+        */
+       if (rte_cpu_to_be_16(pkt->total_length) < sizeof(struct ipv4_hdr))
+               return -5;
+
+       return 0;
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+static void
+print_key(struct ipv4_5tuple key)
+{
+       printf("IP dst = %08x, IP src = %08x, port dst = %d, port src = %d, proto = %d\n",
+              (unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
+}
+
+static inline uint8_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       struct ipv4_5tuple key;
+       struct tcp_hdr *tcp;
+       struct udp_hdr *udp;
+       int ret = 0;
+
+       key.ip_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+       key.ip_src = rte_be_to_cpu_32(ipv4_hdr->src_addr);
+       key.proto = ipv4_hdr->next_proto_id;
+
+       switch (ipv4_hdr->next_proto_id) {
+       case IPPROTO_TCP:
+               tcp = (struct tcp_hdr *)((unsigned char *) ipv4_hdr +
+                                       sizeof(struct ipv4_hdr));
+               key.port_dst = rte_be_to_cpu_16(tcp->dst_port);
+               key.port_src = rte_be_to_cpu_16(tcp->src_port);
+               break;
+
+       case IPPROTO_UDP:
+               udp = (struct udp_hdr *)((unsigned char *) ipv4_hdr +
+                                       sizeof(struct ipv4_hdr));
+               key.port_dst = rte_be_to_cpu_16(udp->dst_port);
+               key.port_src = rte_be_to_cpu_16(udp->src_port);
+               break;
+
+       default:
+               key.port_dst = 0;
+               key.port_src = 0;
+       }
+
+       /* Find destination port */
+       ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
+       return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+static inline uint8_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr,  uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       uint8_t next_hop;
+
+       return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
+                       rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
+                       next_hop : portid);
+}
+#endif
+
+static inline void
+l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+{
+       struct ether_hdr *eth_hdr;
+       struct ipv4_hdr *ipv4_hdr;
+       void *tmp;
+       uint8_t dst_port;
+
+       eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+       ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
+                               sizeof(struct ether_hdr));
+
+#ifdef DO_RFC_1812_CHECKS
+       /* Check to make sure the packet is valid (RFC1812) */
+       if (is_valid_ipv4_pkt(ipv4_hdr, m->pkt.pkt_len) < 0) {
+               rte_pktmbuf_free(m);
+               return;
+       }
+#endif
+
+       dst_port = get_dst_port(ipv4_hdr, portid, l3fwd_lookup_struct);
+       if (dst_port >= MAX_PORTS || (enabled_port_mask & 1 << dst_port) == 0)
+               dst_port = portid;
+
+       /* 00:09:c0:00:00:xx */
+       tmp = &eth_hdr->d_addr.addr_bytes[0];
+       *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
+
+#ifdef DO_RFC_1812_CHECKS
+       /* Update time to live and header checksum */
+       --(ipv4_hdr->time_to_live);
+       ++(ipv4_hdr->hdr_checksum);
+#endif
+
+       /* src addr */
+       ether_addr_copy(&ports_eth_addr[dst_port], &eth_hdr->s_addr);
+
+       send_single_packet(m, dst_port);
+
+}
+
+/* main processing loop */
+static __attribute__((noreturn)) int
+main_loop(__attribute__((unused)) void *dummy)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       unsigned lcore_id;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc;
+       int i, j, nb_rx;
+       uint8_t portid, queueid;
+       struct lcore_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_conf[lcore_id];
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, L3FWD, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, L3FWD, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i].port_id;
+               queueid = qconf->rx_queue_list[i].queue_id;
+               RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
+                       portid, queueid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       /*
+                        * This could be optimized (use queueid instead of
+                        * portid), but it is not called so often
+                        */
+                       for (portid = 0; portid < MAX_PORTS; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               send_burst(&lcore_conf[lcore_id],
+                                       qconf->tx_mbufs[portid].len,
+                                       portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; ++i) {
+
+                       portid = qconf->rx_queue_list[i].port_id;
+                       queueid = qconf->rx_queue_list[i].queue_id;
+                       nb_rx = rte_eth_rx_burst(portid, queueid, pkts_burst, MAX_PKT_BURST);
+
+                       /* Prefetch first packets */
+                       for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(
+                                               pkts_burst[j], void *));
+                       }
+
+                       /* Prefetch and forward already prefetched packets */
+                       for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
+                               rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
+                                               j + PREFETCH_OFFSET], void *));
+                               l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
+                       }
+
+                       /* Forward remaining prefetched packets */
+                       for (; j < nb_rx; j++) {
+                               l3fwd_simple_forward(pkts_burst[j], portid, qconf->lookup_struct);
+                       }
+               }
+       }
+}
+
+static int
+check_lcore_params(void)
+{
+       uint8_t queue, lcore;
+       uint16_t i;
+       int socketid;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               queue = lcore_params[i].queue_id;
+               if (queue >= MAX_RX_QUEUE_PER_PORT) {
+                       printf("invalid queue number: %hhu\n", queue);
+                       return -1;
+               }
+               lcore = lcore_params[i].lcore_id;
+               if (!rte_lcore_is_enabled(lcore)) {
+                       printf("error: lcore %hhu is not enabled in lcore mask\n", lcore);
+                       return -1;
+               }
+               if ((socketid = rte_lcore_to_socket_id(lcore) != 0) &&
+                       (numa_on == 0)) {
+                       printf("warning: lcore %hhu is on socket %d with numa off \n",
+                               lcore, socketid);
+               }
+       }
+       return 0;
+}
+
+static int
+check_port_config(const unsigned nb_ports)
+{
+       unsigned portid;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               portid = lcore_params[i].port_id;
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("port %u is not enabled in port mask\n", portid);
+                       return -1;
+               }
+               if (portid >= nb_ports) {
+                       printf("port %u is not present on the board\n", portid);
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static uint8_t
+get_port_n_rx_queues(const uint8_t port)
+{
+       int queue = -1;
+       uint16_t i;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               if (lcore_params[i].port_id == port && lcore_params[i].queue_id > queue)
+                       queue = lcore_params[i].queue_id;
+       }
+       return (uint8_t)(++queue);
+}
+
+static int
+init_lcore_rx_queues(void)
+{
+       uint16_t i, nb_rx_queue;
+       uint8_t lcore;
+
+       for (i = 0; i < nb_lcore_params; ++i) {
+               lcore = lcore_params[i].lcore_id;
+               nb_rx_queue = lcore_conf[lcore].n_rx_queue;
+               if (nb_rx_queue >= MAX_RX_QUEUE_PER_LCORE) {
+                       printf("error: too many queues (%u) for lcore: %u\n",
+                               (unsigned)nb_rx_queue + 1, (unsigned)lcore);
+                       return -1;
+               } else {
+                       lcore_conf[lcore].rx_queue_list[nb_rx_queue].port_id =
+                               lcore_params[i].port_id;
+                       lcore_conf[lcore].rx_queue_list[nb_rx_queue].queue_id =
+                               lcore_params[i].queue_id;
+                       lcore_conf[lcore].n_rx_queue++;
+               }
+       }
+       return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+       printf ("%s [EAL options] -- -p PORTMASK -P"
+               "  [--config (port,queue,lcore)[,(port,queue,lcore]]\n"
+               "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+               "  --config (port,queue,lcore): rx queues configuration\n"
+               "  --no-numa: optional, disable numa awareness\n",
+               prgname);
+}
+
+static int
+parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static int
+parse_config(const char *q_arg)
+{
+       char s[256];
+       const char *p, *p0 = q_arg;
+       char *end;
+       enum fieldnames {
+               FLD_PORT = 0,
+               FLD_QUEUE,
+               FLD_LCORE,
+               _NUM_FLD
+       };
+       unsigned long int_fld[_NUM_FLD];
+       char *str_fld[_NUM_FLD];
+       int i;
+       unsigned size;
+
+       nb_lcore_params = 0;
+
+       while ((p = strchr(p0,'(')) != NULL) {
+               ++p;
+               if((p0 = strchr(p,')')) == NULL)
+                       return -1;
+
+               size = p0 - p;
+               if(size >= sizeof(s))
+                       return -1;
+
+               rte_snprintf(s, sizeof(s), "%.*s", size, p);
+               if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') != _NUM_FLD)
+                       return -1;
+               for (i = 0; i < _NUM_FLD; i++){
+                       errno = 0;
+                       int_fld[i] = strtoul(str_fld[i], &end, 0);
+                       if (errno != 0 || end == str_fld[i] || int_fld[i] > 255)
+                               return -1;
+               }
+               if (nb_lcore_params >= MAX_LCORE_PARAMS) {
+                       printf("exceeded max number of lcore params: %hu\n",
+                               nb_lcore_params);
+                       return -1;
+               }
+               lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+               lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
+               lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
+               ++nb_lcore_params;
+       }
+       lcore_params = lcore_params_array;
+       return 0;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {"config", 1, 0, 0},
+               {"no-numa", 0, 0, 0},
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:P",
+                               lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       enabled_port_mask = parse_portmask(optarg);
+                       if (enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               print_usage(prgname);
+                               return -1;
+                       }
+                       break;
+               case 'P':
+                       printf("Promiscuous mode selected\n");
+                       promiscuous_on = 1;
+                       break;
+
+               /* long options */
+               case 0:
+                       if (!strcmp(lgopts[option_index].name, "config")) {
+                               ret = parse_config(optarg);
+                               if (ret) {
+                                       printf("invalid config\n");
+                                       print_usage(prgname);
+                                       return -1;
+                               }
+                       }
+
+                       if (!strcmp(lgopts[option_index].name, "no-numa")) {
+                               printf("numa is disabled \n");
+                               numa_on = 0;
+                       }
+                       break;
+
+               default:
+                       print_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+static void
+print_ethaddr(const char *name, const struct ether_addr *eth_addr)
+{
+       printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
+               eth_addr->addr_bytes[0],
+               eth_addr->addr_bytes[1],
+               eth_addr->addr_bytes[2],
+               eth_addr->addr_bytes[3],
+               eth_addr->addr_bytes[4],
+               eth_addr->addr_bytes[5]);
+}
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
+static void
+setup_hash(int socketid)
+{
+       unsigned i;
+       int ret;
+       char s[64];
+
+       /* create  hashes */
+       rte_snprintf(s, sizeof(s), "l3fwd_hash_%d", socketid);
+       l3fwd_hash_params.name = s;
+       l3fwd_hash_params.socket_id = socketid;
+       l3fwd_lookup_struct[socketid] = rte_hash_create(&l3fwd_hash_params);
+       if (l3fwd_lookup_struct[socketid] == NULL)
+               rte_exit(EXIT_FAILURE, "Unable to create the l3fwd hash on "
+                               "socket %d\n", socketid);
+
+       /* populate the hash */
+       for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
+               ret = rte_hash_add_key (l3fwd_lookup_struct[socketid],
+                               (void *) &l3fwd_route_array[i].key);
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE, "Unable to add entry %u to the"
+                               "l3fwd hash on socket %d\n", i, socketid);
+               }
+               l3fwd_out_if[ret] = l3fwd_route_array[i].if_out;
+               printf("Hash: Adding key\n");
+               print_key(l3fwd_route_array[i].key);
+       }
+}
+#endif
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+static void
+setup_lpm(int socketid)
+{
+       unsigned i;
+       int ret;
+       char s[64];
+
+       /* create the LPM table */
+       rte_snprintf(s, sizeof(s), "L3FWD_LPM_%d", socketid);
+       l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
+                               L3FWD_LPM_MAX_RULES, RTE_LPM_MEMZONE);
+       if (l3fwd_lookup_struct[socketid] == NULL)
+               rte_exit(EXIT_FAILURE, "Unable to create the l3fwd LPM table"
+                               " on socket %d\n", socketid);
+
+       /* populate the LPM table */
+       for (i = 0; i < L3FWD_NUM_ROUTES; i++) {
+               ret = rte_lpm_add(l3fwd_lookup_struct[socketid],
+                       l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+
+               if (ret < 0) {
+                       rte_exit(EXIT_FAILURE, "Unable to add entry %u to the "
+                               "l3fwd LPM table on socket %d\n",
+                               i, socketid);
+               }
+
+               printf("LPM: Adding route 0x%08x / %d (%d)\n",
+                       (unsigned)l3fwd_route_array[i].ip,
+                       l3fwd_route_array[i].depth,
+                       l3fwd_route_array[i].if_out);
+       }
+}
+#endif
+
+static int
+init_mem(void)
+{
+       struct lcore_conf *qconf;
+       int socketid;
+       unsigned lcore_id;
+       char s[64];
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+
+               if (numa_on)
+                       socketid = rte_lcore_to_socket_id(lcore_id);
+               else
+                       socketid = 0;
+
+               if (socketid >= NB_SOCKETS) {
+                       rte_exit(EXIT_FAILURE, "Socket %d of lcore %u is out of range %d\n",
+                               socketid, lcore_id, NB_SOCKETS);
+               }
+               if (pktmbuf_pool[socketid] == NULL) {
+                       rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
+                       pktmbuf_pool[socketid] =
+                               rte_mempool_create(s, NB_MBUF, MBUF_SIZE, 32,
+                                       sizeof(struct rte_pktmbuf_pool_private),
+                                       rte_pktmbuf_pool_init, NULL,
+                                       rte_pktmbuf_init, NULL,
+                                       socketid, 0);
+                       if (pktmbuf_pool[socketid] == NULL)
+                               rte_exit(EXIT_FAILURE,
+                                               "Cannot init mbuf pool on socket %d\n", socketid);
+                       else
+                               printf("Allocated mbuf pool on socket %d\n", socketid);
+
+#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
+                       setup_lpm(socketid);
+#else
+                       setup_hash(socketid);
+#endif
+               }
+               qconf = &lcore_conf[lcore_id];
+               qconf->lookup_struct = l3fwd_lookup_struct[socketid];
+       }
+       return 0;
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_conf *qconf;
+       struct rte_eth_link link;
+       int ret;
+       unsigned nb_ports;
+       uint16_t queueid;
+       unsigned lcore_id;
+       uint32_t n_tx_queue, nb_lcores;
+       uint8_t portid, nb_rx_queue, queue, socketid;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid L3FWD parameters\n");
+
+       if (check_lcore_params() < 0)
+               rte_exit(EXIT_FAILURE, "check_lcore_params failed\n");
+
+       ret = init_lcore_rx_queues();
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "init_lcore_rx_queues failed\n");
+
+       ret = init_mem();
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "init_mem failed\n");
+
+       /* init driver */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports > MAX_PORTS)
+               nb_ports = MAX_PORTS;
+
+       if (check_port_config(nb_ports) < 0)
+               rte_exit(EXIT_FAILURE, "check_port_config failed\n");
+
+       nb_lcores = rte_lcore_count();
+
+       /* initialize all ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       printf("\nSkipping disabled port %d\n", portid);
+                       continue;
+               }
+
+               /* init port */
+               printf("Initializing port %d ... ", portid );
+               fflush(stdout);
+
+               nb_rx_queue = get_port_n_rx_queues(portid);
+               n_tx_queue = nb_lcores;
+               if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
+                       n_tx_queue = MAX_TX_QUEUE_PER_PORT;
+               printf("Creating queues: nb_rxq=%d nb_txq=%u... ",
+                       nb_rx_queue, (unsigned)n_tx_queue );
+               ret = rte_eth_dev_configure(portid, nb_rx_queue,
+                                       (uint16_t)n_tx_queue, &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
+                               ret, portid);
+
+               rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+               print_ethaddr(" Address:", &ports_eth_addr[portid]);
+               printf(", ");
+
+
+               /* init one TX queue per couple (lcore,port) */
+               queueid = 0;
+               for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+                       if (rte_lcore_is_enabled(lcore_id) == 0)
+                               continue;
+
+                       if (numa_on)
+                               socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
+                       else
+                               socketid = 0;
+
+                       printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
+                                                    socketid, &tx_conf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
+                                       "port=%d\n", ret, portid);
+
+                       qconf = &lcore_conf[lcore_id];
+                       qconf->tx_queue_id[portid] = queueid;
+                       queueid++;
+               }
+               printf("\n");
+       }
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (rte_lcore_is_enabled(lcore_id) == 0)
+                       continue;
+               qconf = &lcore_conf[lcore_id];
+               printf("\nInitializing rx queues on lcore %u ... ", lcore_id );
+               fflush(stdout);
+               /* init RX queues */
+               for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
+                       portid = qconf->rx_queue_list[queue].port_id;
+                       queueid = qconf->rx_queue_list[queue].queue_id;
+
+                       if (numa_on)
+                               socketid = (uint8_t)rte_lcore_to_socket_id(lcore_id);
+                       else
+                               socketid = 0;
+
+                       printf("rxq=%d,%d,%d ", portid, queueid, socketid);
+                       fflush(stdout);
+
+                       ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
+                                       socketid, &rx_conf, pktmbuf_pool[socketid]);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d,"
+                                               "port=%d\n", ret, portid);
+               }
+       }
+
+       printf("\n");
+
+       /* start ports */
+       for (portid = 0; portid < nb_ports; portid++) {
+               if ((enabled_port_mask & (1 << portid)) == 0) {
+                       continue;
+               }
+               /* Start device */
+               ret = rte_eth_dev_start(portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
+                               ret, portid);
+
+               printf("done: Port %d ", portid);
+
+               /* get link status */
+               rte_eth_link_get(portid, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (unsigned) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               } else {
+                       printf(" Link Down\n");
+               }
+               /*
+                * If enabled, put device in promiscuous mode.
+                * This allows IO forwarding mode to forward packets
+                * to itself through 2 cross-connected  ports of the
+                * target machine.
+                */
+               if (promiscuous_on)
+                       rte_eth_promiscuous_enable(portid);
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/l3fwd/main.h b/examples/l3fwd/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf b/examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf
new file mode 100644 (file)
index 0000000..db4681c
Binary files /dev/null and b/examples/link_status_interrupt/495672_Link_Status_Interrupt_Sample_App_Guide_Rev1.0.pdf differ
diff --git a/examples/link_status_interrupt/Makefile b/examples/link_status_interrupt/Makefile
new file mode 100644 (file)
index 0000000..225dfcc
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = link_status_interrupt
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
new file mode 100644 (file)
index 0000000..2c50461
--- /dev/null
@@ -0,0 +1,792 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+#include "main.h"
+
+#define RTE_LOGTYPE_LSI RTE_LOGTYPE_USER1
+
+#define LSI_MAX_PORTS 32
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUF   8192
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
+#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
+#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
+#define TX_HTHRESH 0  /**< Default values of TX host threshold reg. */
+#define TX_WTHRESH 0  /**< Default values of TX write-back threshold reg. */
+
+#define MAX_PKT_BURST 32
+#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+
+#define SOCKET0 0
+
+/*
+ * Configurable number of RX/TX ring descriptors
+ */
+#define RTE_TEST_RX_DESC_DEFAULT 128
+#define RTE_TEST_TX_DESC_DEFAULT 512
+static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
+static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
+
+/* ethernet addresses of ports */
+static struct ether_addr lsi_ports_eth_addr[LSI_MAX_PORTS];
+
+/* mask of enabled ports */
+static uint32_t lsi_enabled_port_mask = 0;
+
+static unsigned int lsi_rx_queue_per_lcore = 1;
+
+/* destination port for L2 forwarding */
+static unsigned lsi_dst_ports[LSI_MAX_PORTS] = {0};
+
+#define MAX_PKT_BURST 32
+struct mbuf_table {
+       unsigned len;
+       struct rte_mbuf *m_table[MAX_PKT_BURST];
+};
+
+#define MAX_RX_QUEUE_PER_LCORE 16
+#define MAX_TX_QUEUE_PER_PORT 16
+struct lcore_queue_conf {
+       unsigned n_rx_queue;
+       unsigned rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
+       unsigned tx_queue_id;
+       struct mbuf_table tx_mbufs[LSI_MAX_PORTS];
+
+} __rte_cache_aligned;
+struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
+
+static const struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .txmode = {
+       },
+       .intr_conf = {
+               .lsc = 1, /**< lsc interrupt feature enabled */
+       },
+};
+
+static const struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = RX_PTHRESH,
+               .hthresh = RX_HTHRESH,
+               .wthresh = RX_WTHRESH,
+       },
+};
+
+static const struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = TX_PTHRESH,
+               .hthresh = TX_HTHRESH,
+               .wthresh = TX_WTHRESH,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+struct rte_mempool * lsi_pktmbuf_pool = NULL;
+
+/* Per-port statistics struct */
+struct lsi_port_statistics {
+       uint64_t tx;
+       uint64_t rx;
+       uint64_t dropped;
+} __rte_cache_aligned;
+struct lsi_port_statistics port_statistics[LSI_MAX_PORTS];
+
+/* A tsc-based timer responsible for triggering statistics printout */
+#define TIMER_MILLISECOND 2000000ULL /* around 1ms at 2 Ghz */
+#define MAX_TIMER_PERIOD 86400 /* 1 day max */
+static int64_t timer_period = 10 * TIMER_MILLISECOND * 1000; /* default period is 10 seconds */
+
+/* Print out statistics on packets dropped */
+static void
+print_stats(void)
+{
+       struct rte_eth_link link;
+       uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
+       unsigned portid;
+
+       total_packets_dropped = 0;
+       total_packets_tx = 0;
+       total_packets_rx = 0;
+
+       const char clr[] = { 27, '[', '2', 'J', '\0' };
+       const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+
+               /* Clear screen and move to top left */
+       printf("%s%s", clr, topLeft);
+
+       printf("\nPort statistics ====================================");
+
+       for (portid = 0; portid < LSI_MAX_PORTS; portid++) {
+               /* skip ports that are not enabled */
+               if ((lsi_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               memset(&link, 0, sizeof(link));
+               rte_eth_link_get_nowait((uint8_t)portid, &link);
+               printf("\nStatistics for port %u ------------------------------"
+                          "\nLink status: %25s"
+                          "\nLink speed: %26u"
+                          "\nLink duplex: %25s"
+                          "\nPackets sent: %24"PRIu64
+                          "\nPackets received: %20"PRIu64
+                          "\nPackets dropped: %21"PRIu64,
+                          portid,
+                          (link.link_status ? "Link up" : "Link down"),
+                          (unsigned)link.link_speed,
+                          (link.link_duplex == ETH_LINK_FULL_DUPLEX ? \
+                                       "full-duplex" : "half-duplex"),
+                          port_statistics[portid].tx,
+                          port_statistics[portid].rx,
+                          port_statistics[portid].dropped);
+
+               total_packets_dropped += port_statistics[portid].dropped;
+               total_packets_tx += port_statistics[portid].tx;
+               total_packets_rx += port_statistics[portid].rx;
+       }
+       printf("\nAggregate statistics ==============================="
+                  "\nTotal packets sent: %18"PRIu64
+                  "\nTotal packets received: %14"PRIu64
+                  "\nTotal packets dropped: %15"PRIu64,
+                  total_packets_tx,
+                  total_packets_rx,
+                  total_packets_dropped);
+       printf("\n====================================================\n");
+}
+
+/* Send the packet on an output interface */
+static int
+lsi_send_burst(struct lcore_queue_conf *qconf, unsigned n, uint8_t port)
+{
+       struct rte_mbuf **m_table;
+       unsigned ret;
+       unsigned queueid;
+
+       queueid = (uint16_t) qconf->tx_queue_id;
+       m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
+
+       ret = rte_eth_tx_burst(port, (uint16_t) queueid, m_table, (uint16_t) n);
+       port_statistics[port].tx += ret;
+       if (unlikely(ret < n)) {
+               port_statistics[port].dropped += (n - ret);
+               do {
+                       rte_pktmbuf_free(m_table[ret]);
+               } while (++ret < n);
+       }
+
+       return 0;
+}
+
+/* Send the packet on an output interface */
+static int
+lsi_send_packet(struct rte_mbuf *m, uint8_t port)
+{
+       unsigned lcore_id, len;
+       struct lcore_queue_conf *qconf;
+
+       lcore_id = rte_lcore_id();
+
+       qconf = &lcore_queue_conf[lcore_id];
+       len = qconf->tx_mbufs[port].len;
+       qconf->tx_mbufs[port].m_table[len] = m;
+       len++;
+
+       /* enough pkts to be sent */
+       if (unlikely(len == MAX_PKT_BURST)) {
+               lsi_send_burst(qconf, MAX_PKT_BURST, port);
+               len = 0;
+       }
+
+       qconf->tx_mbufs[port].len = len;
+       return 0;
+}
+
+static void
+lsi_simple_forward(struct rte_mbuf *m, unsigned portid)
+{
+       struct ether_hdr *eth;
+       void *tmp;
+       unsigned dst_port = lsi_dst_ports[portid];
+
+       eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+       /* 00:09:c0:00:00:xx */
+       tmp = &eth->d_addr.addr_bytes[0];
+       *((uint64_t *)tmp) = 0x000000c00900 + (dst_port << 24);
+
+       /* src addr */
+       ether_addr_copy(&lsi_ports_eth_addr[dst_port], &eth->s_addr);
+
+       lsi_send_packet(m, (uint8_t) dst_port);
+}
+
+/* main processing loop */
+static void
+lsi_main_loop(void)
+{
+       struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+       struct rte_mbuf *m;
+       unsigned lcore_id;
+       uint64_t prev_tsc = 0;
+       uint64_t diff_tsc, cur_tsc, timer_tsc;
+       unsigned i, j, portid, nb_rx;
+       struct lcore_queue_conf *qconf;
+
+       timer_tsc = 0;
+
+       lcore_id = rte_lcore_id();
+       qconf = &lcore_queue_conf[lcore_id];
+
+       if (qconf->n_rx_queue == 0) {
+               RTE_LOG(INFO, LSI, "lcore %u has nothing to do\n", lcore_id);
+               while(1);
+       }
+
+       RTE_LOG(INFO, LSI, "entering main loop on lcore %u\n", lcore_id);
+
+       for (i = 0; i < qconf->n_rx_queue; i++) {
+
+               portid = qconf->rx_queue_list[i];
+               RTE_LOG(INFO, LSI, " -- lcoreid=%u portid=%u\n", lcore_id,
+                       portid);
+       }
+
+       while (1) {
+
+               cur_tsc = rte_rdtsc();
+
+               /*
+                * TX burst queue drain
+                */
+               diff_tsc = cur_tsc - prev_tsc;
+               if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+
+                       /* this could be optimized (use queueid instead of
+                        * portid), but it is not called so often */
+                       for (portid = 0; portid < LSI_MAX_PORTS; portid++) {
+                               if (qconf->tx_mbufs[portid].len == 0)
+                                       continue;
+                               lsi_send_burst(&lcore_queue_conf[lcore_id],
+                                                qconf->tx_mbufs[portid].len,
+                                                (uint8_t) portid);
+                               qconf->tx_mbufs[portid].len = 0;
+                       }
+
+                       /* if timer is enabled */
+                       if (timer_period > 0) {
+
+                               /* advance the timer */
+                               timer_tsc += diff_tsc;
+
+                               /* if timer has reached its timeout */
+                               if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
+
+                                       /* do this only on master core */
+                                       if (lcore_id == rte_get_master_lcore()) {
+                                               print_stats();
+                                               /* reset the timer */
+                                               timer_tsc = 0;
+                                       }
+                               }
+                       }
+
+                       prev_tsc = cur_tsc;
+               }
+
+               /*
+                * Read packet from RX queues
+                */
+               for (i = 0; i < qconf->n_rx_queue; i++) {
+
+                       portid = qconf->rx_queue_list[i];
+                       nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+                                                pkts_burst, MAX_PKT_BURST);
+
+                       port_statistics[portid].rx += nb_rx;
+
+                       for (j = 0; j < nb_rx; j++) {
+                               m = pkts_burst[j];
+                               rte_prefetch0(rte_pktmbuf_mtod(m, void *));
+                               lsi_simple_forward(m, portid);
+                       }
+               }
+       }
+}
+
+static int
+lsi_launch_one_lcore(__attribute__((unused)) void *dummy)
+{
+       lsi_main_loop();
+       return 0;
+}
+
+/* display usage */
+static void
+lsi_usage(const char *prgname)
+{
+       printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
+               "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
+               "  -q NQ: number of queue (=ports) per lcore (default is 1)\n"
+               "  -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
+                       prgname);
+}
+
+static int
+lsi_parse_portmask(const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+
+       /* parse hexadecimal string */
+       pm = strtoul(portmask, &end, 16);
+       if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+
+       if (pm == 0)
+               return -1;
+
+       return pm;
+}
+
+static unsigned int
+lsi_parse_nqueue(const char *q_arg)
+{
+       char *end = NULL;
+       unsigned long n;
+
+       /* parse hexadecimal string */
+       n = strtoul(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return 0;
+       if (n == 0)
+               return 0;
+       if (n >= MAX_RX_QUEUE_PER_LCORE)
+               return 0;
+
+       return n;
+}
+
+static int
+lsi_parse_timer_period(const char *q_arg)
+{
+       char *end = NULL;
+       int n;
+
+       /* parse number string */
+       n = strtol(q_arg, &end, 10);
+       if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
+               return -1;
+       if (n >= MAX_TIMER_PERIOD)
+               return -1;
+
+       return n;
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+lsi_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* portmask */
+               case 'p':
+                       lsi_enabled_port_mask = lsi_parse_portmask(optarg);
+                       if (lsi_enabled_port_mask == 0) {
+                               printf("invalid portmask\n");
+                               lsi_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* nqueue */
+               case 'q':
+                       lsi_rx_queue_per_lcore = lsi_parse_nqueue(optarg);
+                       if (lsi_rx_queue_per_lcore == 0) {
+                               printf("invalid queue number\n");
+                               lsi_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* timer period */
+               case 'T':
+                       timer_period = lsi_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
+                       if (timer_period < 0) {
+                               printf("invalid timer period\n");
+                               lsi_usage(prgname);
+                               return -1;
+                       }
+                       break;
+
+               /* long options */
+               case 0:
+                       lsi_usage(prgname);
+                       return -1;
+
+               default:
+                       lsi_usage(prgname);
+                       return -1;
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+/**
+ * It will be called as the callback for specified port after a LSI interrupt
+ * has been fully handled. This callback needs to be implemented carefully as
+ * it will be called in the interrupt host thread which is different from the
+ * application main thread.
+ *
+ * @param port_id
+ *  Port id.
+ * @param type
+ *  event type.
+ * @param param
+ *  Pointer to(address of) the parameters.
+ *
+ * @return
+ *  void.
+ */
+static void
+lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
+{
+       struct rte_eth_link link;
+
+       RTE_SET_USED(param);
+
+       printf("\n\nIn registered callback...\n");
+       printf("Event type: %s\n", type == RTE_ETH_EVENT_INTR_LSC ? "LSC interrupt" : "unknown event");
+       rte_eth_link_get(port_id, &link);
+       if (link.link_status) {
+               printf("Port %d Link Up - speed %u Mbps - %s\n\n",
+                               port_id, (unsigned)link.link_speed,
+                       (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                               ("full-duplex") : ("half-duplex"));
+       } else
+               printf("Port %d Link Down\n\n", port_id);
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       struct lcore_queue_conf *qconf;
+       struct rte_eth_dev_info dev_info;
+       struct rte_eth_link link;
+       int ret;
+       unsigned int nb_ports, nb_lcores;
+       unsigned portid, portid_last = 0, queueid = 0;
+       unsigned lcore_id, rx_lcore_id;
+       unsigned n_tx_queue, max_tx_queues;
+       unsigned nb_ports_in_mask = 0;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "rte_eal_init failed");
+       argc -= ret;
+       argv += ret;
+
+       /* parse application arguments (after the EAL ones) */
+       ret = lsi_parse_args(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Invalid arguments");
+
+       /* create the mbuf pool */
+       lsi_pktmbuf_pool =
+               rte_mempool_create("mbuf_pool", NB_MBUF,
+                                  MBUF_SIZE, 32,
+                                  sizeof(struct rte_pktmbuf_pool_private),
+                                  rte_pktmbuf_pool_init, NULL,
+                                  rte_pktmbuf_init, NULL,
+                                  SOCKET0, 0);
+       if (lsi_pktmbuf_pool == NULL)
+               rte_panic("Cannot init mbuf pool\n");
+
+       /* init driver(s) */
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_panic("Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_panic("Cannot init ixgbe pmd\n");
+#endif
+
+       if (rte_eal_pci_probe() < 0)
+               rte_panic("Cannot probe PCI\n");
+
+       nb_ports = rte_eth_dev_count();
+       if (nb_ports == 0)
+               rte_panic("No Ethernet port - bye\n");
+
+       if (nb_ports > LSI_MAX_PORTS)
+               nb_ports = LSI_MAX_PORTS;
+
+       nb_lcores = rte_lcore_count();
+
+       /*
+        * Each logical core is assigned a dedicated TX queue on each port.
+        * Compute the maximum number of TX queues that can be used.
+        */
+       max_tx_queues = nb_lcores;
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((lsi_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               /* save the destination port id */
+               if (nb_ports_in_mask % 2) {
+                       lsi_dst_ports[portid] = portid_last;
+                       lsi_dst_ports[portid_last] = portid;
+               }
+               else
+                       portid_last = portid;
+
+               nb_ports_in_mask++;
+
+               rte_eth_dev_info_get((uint8_t) portid, &dev_info);
+               if (max_tx_queues > dev_info.max_tx_queues)
+                       max_tx_queues = dev_info.max_tx_queues;
+       }
+
+       if (nb_ports_in_mask < 2 || nb_ports_in_mask % 2)
+               rte_exit(EXIT_FAILURE, "Current enabled port number is %u, "
+                               "but it should be even and at least 2\n",
+                               nb_ports_in_mask);
+
+       rx_lcore_id = 0;
+       qconf = &lcore_queue_conf[rx_lcore_id];
+       qconf->tx_queue_id = 0;
+       n_tx_queue = 1;
+
+       /* Initialize the port/queue configuration of each logical core */
+       for (portid = 0; portid < nb_ports; portid++) {
+               /* skip ports that are not enabled */
+               if ((lsi_enabled_port_mask & (1 << portid)) == 0)
+                       continue;
+
+               /* get the lcore_id for this port */
+               while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
+                      lcore_queue_conf[rx_lcore_id].n_rx_queue ==
+                      lsi_rx_queue_per_lcore) {
+
+                       rx_lcore_id++;
+                       if (rx_lcore_id >= RTE_MAX_LCORE)
+                               rte_exit(EXIT_FAILURE, "Not enough cores\n");
+                       if (n_tx_queue == max_tx_queues)
+                               rte_exit(EXIT_FAILURE, "Not enough TX queues\n");
+               }
+               if (qconf != &lcore_queue_conf[rx_lcore_id]) {
+                       /* Assigned a new logical core in the loop above. */
+                       qconf = &lcore_queue_conf[rx_lcore_id];
+                       qconf->tx_queue_id = n_tx_queue;
+                       n_tx_queue++;
+               }
+               qconf->rx_queue_list[qconf->n_rx_queue] = portid;
+               qconf->n_rx_queue++;
+               printf("Lcore %u: RX port %u TX queue %u\n",
+                      rx_lcore_id, portid, qconf->tx_queue_id);
+       }
+
+       /* Initialise each port */
+       for (portid = 0; portid < nb_ports; portid++) {
+
+               /* skip ports that are not enabled */
+               if ((lsi_enabled_port_mask & (1 << portid)) == 0) {
+                       printf("Skipping disabled port %u\n", portid);
+                       continue;
+               }
+               /* init port */
+               printf("Initializing port %u... ", portid);
+               fflush(stdout);
+               ret = rte_eth_dev_configure((uint8_t) portid, 1,
+                                           (uint16_t) n_tx_queue, &port_conf);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
+                                 ret, portid);
+
+               /* register lsi interrupt callback, need to be after
+                * rte_eth_dev_configure(). if (intr_conf.lsc == 0), no
+                * lsc interrupt will be present, and below callback to
+                * be registered will never be called.
+                */
+               rte_eth_dev_callback_register((uint8_t)portid,
+                       RTE_ETH_EVENT_INTR_LSC, lsi_event_callback, NULL);
+
+               rte_eth_macaddr_get((uint8_t) portid,
+                                   &lsi_ports_eth_addr[portid]);
+
+               /* init one RX queue */
+               fflush(stdout);
+               ret = rte_eth_rx_queue_setup((uint8_t) portid, 0, nb_rxd,
+                                            SOCKET0, &rx_conf,
+                                            lsi_pktmbuf_pool);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+                                 ret, portid);
+
+               /* init one TX queue logical core on each port */
+               for (queueid = 0; queueid < n_tx_queue; queueid++) {
+                       fflush(stdout);
+                       ret = rte_eth_tx_queue_setup((uint8_t) portid,
+                                                    (uint16_t) queueid, nb_txd,
+                                                    SOCKET0, &tx_conf);
+                       if (ret < 0)
+                               rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
+                                         "port=%u queue=%u\n",
+                                         ret, portid, queueid);
+               }
+
+               /* Start device */
+               ret = rte_eth_dev_start((uint8_t) portid);
+               if (ret < 0)
+                       rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%u\n",
+                                 ret, portid);
+
+               printf("done: ");
+
+               /* get link status */
+               rte_eth_link_get((uint8_t) portid, &link);
+               if (link.link_status) {
+                       printf(" Link Up - speed %u Mbps - %s\n",
+                              (unsigned) link.link_speed,
+                              (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                              ("full-duplex") : ("half-duplex\n"));
+               } else {
+                       printf(" Link Down\n");
+               }
+
+               printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
+                               portid,
+                               lsi_ports_eth_addr[portid].addr_bytes[0],
+                               lsi_ports_eth_addr[portid].addr_bytes[1],
+                               lsi_ports_eth_addr[portid].addr_bytes[2],
+                               lsi_ports_eth_addr[portid].addr_bytes[3],
+                               lsi_ports_eth_addr[portid].addr_bytes[4],
+                               lsi_ports_eth_addr[portid].addr_bytes[5]);
+
+               /* initialize port stats */
+               memset(&port_statistics, 0, sizeof(port_statistics));
+       }
+
+       /* launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(lsi_launch_one_lcore, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (rte_eal_wait_lcore(lcore_id) < 0)
+                       return -1;
+       }
+
+       return 0;
+}
diff --git a/examples/link_status_interrupt/main.h b/examples/link_status_interrupt/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf b/examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..6ce67fc
Binary files /dev/null and b/examples/load_balancer/482252_LoadBalancer_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/load_balancer/Makefile b/examples/load_balancer/Makefile
new file mode 100644 (file)
index 0000000..bfd4b2b
--- /dev/null
@@ -0,0 +1,58 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = load_balancer
+
+# all source are stored in SRCS-y
+SRCS-y := main.c config.c init.c runtime.c
+
+CFLAGS += -O3 -g
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
new file mode 100644 (file)
index 0000000..fdb3d19
--- /dev/null
@@ -0,0 +1,1058 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+#include <rte_string_fns.h>
+
+#include "main.h"
+
+struct app_params app;
+
+static const char usage[] =
+"                                                                               \n"
+"    load_balancer <EAL PARAMS> -- <APP PARAMS>                                 \n"
+"                                                                               \n"
+"Application manadatory parameters:                                             \n"
+"    --rx \"(PORT, QUEUE, LCORE), ...\" : List of NIC RX ports and queues       \n"
+"           handled by the I/O RX lcores                                        \n"
+"    --tx \"(PORT, LCORE), ...\" : List of NIC TX ports handled by the I/O TX   \n"
+"           lcores                                                              \n"
+"    --w \"LCORE, ...\" : List of the worker lcores                             \n"
+"    --lpm \"IP / PREFIX => PORT; ...\" : List of LPM rules used by the worker  \n"
+"           lcores for packet forwarding                                        \n"
+"                                                                               \n"
+"Application optional parameters:                                               \n"
+"    --rsz \"A, B, C, D\" : Ring sizes                                          \n"
+"           A = Size (in number of buffer descriptors) of each of the NIC RX    \n"
+"               rings read by the I/O RX lcores (default value is %u)           \n"
+"           B = Size (in number of elements) of each of the SW rings used by the\n"
+"               I/O RX lcores to send packets to worker lcores (default value is\n"
+"               %u)                                                             \n"
+"           C = Size (in number of elements) of each of the SW rings used by the\n"
+"               worker lcores to send packets to I/O TX lcores (default value is\n"
+"               %u)                                                             \n"
+"           D = Size (in number of buffer descriptors) of each of the NIC TX    \n"
+"               rings written by I/O TX lcores (default value is %u)            \n"
+"    --bsz \"(A, B), (C, D), (E, F)\" :  Burst sizes                            \n"
+"           A = I/O RX lcore read burst size from NIC RX (default value is %u)  \n"
+"           B = I/O RX lcore write burst size to output SW rings (default value \n"
+"               is %u)                                                          \n"
+"           C = Worker lcore read burst size from input SW rings (default value \n"
+"               is %u)                                                          \n"
+"           D = Worker lcore write burst size to output SW rings (default value \n"
+"               is %u)                                                          \n"
+"           E = I/O TX lcore read burst size from input SW rings (default value \n"
+"               is %u)                                                          \n"
+"           F = I/O TX lcore write burst size to NIC TX (default value is %u)   \n"
+"    --pos-lb POS : Position of the 1-byte field within the input packet used by\n"
+"           the I/O RX lcores to identify the worker lcore for the current      \n"
+"           packet (default value is %u)                                        \n";
+
+void
+app_print_usage(void)
+{
+       printf(usage,
+               APP_DEFAULT_NIC_RX_RING_SIZE,
+               APP_DEFAULT_RING_RX_SIZE,
+               APP_DEFAULT_RING_TX_SIZE,
+               APP_DEFAULT_NIC_TX_RING_SIZE,
+               APP_DEFAULT_BURST_SIZE_IO_RX_READ,
+               APP_DEFAULT_BURST_SIZE_IO_RX_WRITE,
+               APP_DEFAULT_BURST_SIZE_WORKER_READ,
+               APP_DEFAULT_BURST_SIZE_WORKER_WRITE,
+               APP_DEFAULT_BURST_SIZE_IO_TX_READ,
+               APP_DEFAULT_BURST_SIZE_IO_TX_WRITE,
+               APP_DEFAULT_IO_RX_LB_POS
+       );
+}
+
+#ifndef APP_ARG_RX_MAX_CHARS
+#define APP_ARG_RX_MAX_CHARS     4096
+#endif
+
+#ifndef APP_ARG_RX_MAX_TUPLES
+#define APP_ARG_RX_MAX_TUPLES    128
+#endif
+
+static int
+str_to_unsigned_array(
+       const char *s, size_t sbuflen,
+       char separator,
+       unsigned num_vals,
+       unsigned *vals)
+{
+       char str[sbuflen+1];
+       char *splits[num_vals];
+       char *endptr = NULL;
+       int i, num_splits = 0;
+
+       /* copy s so we don't modify original string */
+       rte_snprintf(str, sizeof(str), "%s", s);
+       num_splits = rte_strsplit(str, sizeof(str), splits, num_vals, separator);
+
+       errno = 0;
+       for (i = 0; i < num_splits; i++) {
+               vals[i] = strtoul(splits[i], &endptr, 0);
+               if (errno != 0 || *endptr != '\0')
+                       return -1;
+       }
+
+       return num_splits;
+}
+
+static int
+str_to_unsigned_vals(
+       const char *s,
+       size_t sbuflen,
+       char separator,
+       unsigned num_vals, ...)
+{
+       unsigned i, vals[num_vals];
+       va_list ap;
+
+       num_vals = str_to_unsigned_array(s, sbuflen, separator, num_vals, vals);
+
+       va_start(ap, num_vals);
+       for (i = 0; i < num_vals; i++) {
+               unsigned *u = va_arg(ap, unsigned *);
+               *u = vals[i];
+       }
+       va_end(ap);
+       return num_vals;
+}
+
+static int
+parse_arg_rx(const char *arg)
+{
+       const char *p0 = arg, *p = arg;
+       uint32_t n_tuples;
+
+       if (strnlen(arg, APP_ARG_RX_MAX_CHARS + 1) == APP_ARG_RX_MAX_CHARS + 1) {
+               return -1;
+       }
+
+       n_tuples = 0;
+       while ((p = strchr(p0,'(')) != NULL) {
+               struct app_lcore_params *lp;
+               uint32_t port, queue, lcore, i;
+
+               p0 = strchr(p++, ')');
+               if ((p0 == NULL) ||
+                   (str_to_unsigned_vals(p, p0 - p, ',', 3, &port, &queue, &lcore) !=  3)) {
+                       return -2;
+               }
+
+               /* Enable port and queue for later initialization */
+               if ((port >= APP_MAX_NIC_PORTS) || (queue >= APP_MAX_RX_QUEUES_PER_NIC_PORT)) {
+                       return -3;
+               }
+               if (app.nic_rx_queue_mask[port][queue] != 0) {
+                       return -4;
+               }
+               app.nic_rx_queue_mask[port][queue] = 1;
+
+               /* Check and assign (port, queue) to I/O lcore */
+               if (rte_lcore_is_enabled(lcore) == 0) {
+                       return -5;
+               }
+
+               if (lcore >= APP_MAX_LCORES) {
+                       return -6;
+               }
+               lp = &app.lcore_params[lcore];
+               if (lp->type == e_APP_LCORE_WORKER) {
+                       return -7;
+               }
+               lp->type = e_APP_LCORE_IO;
+               for (i = 0; i < lp->io.rx.n_nic_queues; i ++) {
+                       if ((lp->io.rx.nic_queues[i].port == port) &&
+                           (lp->io.rx.nic_queues[i].queue == queue)) {
+                               return -8;
+                       }
+               }
+               if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) {
+                       return -9;
+               }
+               lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port;
+               lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue;
+               lp->io.rx.n_nic_queues ++;
+
+               n_tuples ++;
+               if (n_tuples > APP_ARG_RX_MAX_TUPLES) {
+                       return -10;
+               }
+       }
+
+       if (n_tuples == 0) {
+               return -11;
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_TX_MAX_CHARS
+#define APP_ARG_TX_MAX_CHARS     4096
+#endif
+
+#ifndef APP_ARG_TX_MAX_TUPLES
+#define APP_ARG_TX_MAX_TUPLES    128
+#endif
+
+static int
+parse_arg_tx(const char *arg)
+{
+       const char *p0 = arg, *p = arg;
+       uint32_t n_tuples;
+
+       if (strnlen(arg, APP_ARG_TX_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) {
+               return -1;
+       }
+
+       n_tuples = 0;
+       while ((p = strchr(p0,'(')) != NULL) {
+               struct app_lcore_params *lp;
+               uint32_t port, lcore, i;
+
+               p0 = strchr(p++, ')');
+               if ((p0 == NULL) ||
+                   (str_to_unsigned_vals(p, p0 - p, ',', 2, &port, &lcore) !=  2)) {
+                       return -2;
+               }
+
+               /* Enable port and queue for later initialization */
+               if (port >= APP_MAX_NIC_PORTS) {
+                       return -3;
+               }
+               if (app.nic_tx_port_mask[port] != 0) {
+                       return -4;
+               }
+               app.nic_tx_port_mask[port] = 1;
+
+               /* Check and assign (port, queue) to I/O lcore */
+               if (rte_lcore_is_enabled(lcore) == 0) {
+                       return -5;
+               }
+
+               if (lcore >= APP_MAX_LCORES) {
+                       return -6;
+               }
+               lp = &app.lcore_params[lcore];
+               if (lp->type == e_APP_LCORE_WORKER) {
+                       return -7;
+               }
+               lp->type = e_APP_LCORE_IO;
+               for (i = 0; i < lp->io.tx.n_nic_ports; i ++) {
+                       if (lp->io.tx.nic_ports[i] == port) {
+                               return -8;
+                       }
+               }
+               if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
+                       return -9;
+               }
+               lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
+               lp->io.tx.n_nic_ports ++;
+
+               n_tuples ++;
+               if (n_tuples > APP_ARG_TX_MAX_TUPLES) {
+                       return -10;
+               }
+       }
+
+       if (n_tuples == 0) {
+               return -11;
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_W_MAX_CHARS
+#define APP_ARG_W_MAX_CHARS     4096
+#endif
+
+#ifndef APP_ARG_W_MAX_TUPLES
+#define APP_ARG_W_MAX_TUPLES    APP_MAX_WORKER_LCORES
+#endif
+
+static int
+parse_arg_w(const char *arg)
+{
+       const char *p = arg;
+       uint32_t n_tuples;
+
+       if (strnlen(arg, APP_ARG_W_MAX_CHARS + 1) == APP_ARG_W_MAX_CHARS + 1) {
+               return -1;
+       }
+
+       n_tuples = 0;
+       while (*p != 0) {
+               struct app_lcore_params *lp;
+               uint32_t lcore;
+
+               errno = 0;
+               lcore = strtoul(p, NULL, 0);
+               if ((errno != 0)) {
+                       return -2;
+               }
+
+               /* Check and enable worker lcore */
+               if (rte_lcore_is_enabled(lcore) == 0) {
+                       return -3;
+               }
+
+               if (lcore >= APP_MAX_LCORES) {
+                       return -4;
+               }
+               lp = &app.lcore_params[lcore];
+               if (lp->type == e_APP_LCORE_IO) {
+                       return -5;
+               }
+               lp->type = e_APP_LCORE_WORKER;
+
+               n_tuples ++;
+               if (n_tuples > APP_ARG_W_MAX_TUPLES) {
+                       return -6;
+               }
+
+               p = strchr(p, ',');
+               if (p == NULL) {
+                       break;
+               }
+               p ++;
+       }
+
+       if (n_tuples == 0) {
+               return -7;
+       }
+
+       if ((n_tuples & (n_tuples - 1)) != 0) {
+               return -8;
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_LPM_MAX_CHARS
+#define APP_ARG_LPM_MAX_CHARS     4096
+#endif
+
+static int
+parse_arg_lpm(const char *arg)
+{
+       const char *p = arg, *p0;
+
+       if (strnlen(arg, APP_ARG_LPM_MAX_CHARS + 1) == APP_ARG_TX_MAX_CHARS + 1) {
+               return -1;
+       }
+
+       while (*p != 0) {
+               uint32_t ip_a, ip_b, ip_c, ip_d, ip, depth, if_out;
+               char *endptr;
+
+               p0 = strchr(p, '/');
+               if ((p0 == NULL) ||
+                   (str_to_unsigned_vals(p, p0 - p, '.', 4, &ip_a, &ip_b, &ip_c, &ip_d) != 4)) {
+                       return -2;
+               }
+
+               p = p0 + 1;
+               errno = 0;
+               depth = strtoul(p, &endptr, 0);
+               if (errno != 0 || *endptr != '=') {
+                       return -3;
+               }
+               p = strchr(p, '>');
+               if (p == NULL) {
+                       return -4;
+               }
+               if_out = strtoul(++p, &endptr, 0);
+               if (errno != 0 || (*endptr != '\0' && *endptr != ';')) {
+                       return -5;
+               }
+
+               if ((ip_a >= 256) || (ip_b >= 256) || (ip_c >= 256) || (ip_d >= 256) ||
+                    (depth == 0) || (depth >= 32) ||
+                        (if_out >= APP_MAX_NIC_PORTS)) {
+                       return -6;
+               }
+               ip = (ip_a << 24) | (ip_b << 16) | (ip_c << 8) | ip_d;
+
+               if (app.n_lpm_rules >= APP_MAX_LPM_RULES) {
+                       return -7;
+               }
+               app.lpm_rules[app.n_lpm_rules].ip = ip;
+               app.lpm_rules[app.n_lpm_rules].depth = (uint8_t) depth;
+               app.lpm_rules[app.n_lpm_rules].if_out = (uint8_t) if_out;
+               app.n_lpm_rules ++;
+
+               p = strchr(p, ';');
+               if (p == NULL) {
+                       return -8;
+               }
+               p ++;
+       }
+
+       if (app.n_lpm_rules == 0) {
+               return -9;
+       }
+
+       return 0;
+}
+
+static int
+app_check_lpm_table(void)
+{
+       uint32_t rule;
+
+       /* For each rule, check that the output I/F is enabled */
+       for (rule = 0; rule < app.n_lpm_rules; rule ++)
+       {
+               uint32_t port = app.lpm_rules[rule].if_out;
+
+               if (app.nic_tx_port_mask[port] == 0) {
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+static int
+app_check_every_rx_port_is_tx_enabled(void)
+{
+       uint8_t port;
+
+       for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+               if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) {
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_RSZ_CHARS
+#define APP_ARG_RSZ_CHARS 63
+#endif
+
+static int
+parse_arg_rsz(const char *arg)
+{
+       if (strnlen(arg, APP_ARG_RSZ_CHARS + 1) == APP_ARG_RSZ_CHARS + 1) {
+               return -1;
+       }
+
+       if (str_to_unsigned_vals(arg, APP_ARG_RSZ_CHARS, ',', 4,
+                       &app.nic_rx_ring_size,
+                       &app.ring_rx_size,
+                       &app.ring_tx_size,
+                       &app.nic_tx_ring_size) !=  4)
+               return -2;
+
+
+       if ((app.nic_rx_ring_size == 0) ||
+               (app.nic_tx_ring_size == 0) ||
+               (app.ring_rx_size == 0) ||
+               (app.ring_tx_size == 0)) {
+               return -3;
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_BSZ_CHARS
+#define APP_ARG_BSZ_CHARS 63
+#endif
+
+static int
+parse_arg_bsz(const char *arg)
+{
+       const char *p = arg, *p0;
+       if (strnlen(arg, APP_ARG_BSZ_CHARS + 1) == APP_ARG_BSZ_CHARS + 1) {
+               return -1;
+       }
+
+       p0 = strchr(p++, ')');
+       if ((p0 == NULL) ||
+           (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_rx_read, &app.burst_size_io_rx_write) !=  2)) {
+               return -2;
+       }
+
+       p = strchr(p0, '(');
+       if (p == NULL) {
+               return -3;
+       }
+
+       p0 = strchr(p++, ')');
+       if ((p0 == NULL) ||
+           (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_worker_read, &app.burst_size_worker_write) !=  2)) {
+               return -4;
+       }
+
+       p = strchr(p0, '(');
+       if (p == NULL) {
+               return -5;
+       }
+
+       p0 = strchr(p++, ')');
+       if ((p0 == NULL) ||
+           (str_to_unsigned_vals(p, p0 - p, ',', 2, &app.burst_size_io_tx_read, &app.burst_size_io_tx_write) !=  2)) {
+               return -6;
+       }
+
+       if ((app.burst_size_io_rx_read == 0) ||
+               (app.burst_size_io_rx_write == 0) ||
+               (app.burst_size_worker_read == 0) ||
+               (app.burst_size_worker_write == 0) ||
+               (app.burst_size_io_tx_read == 0) ||
+               (app.burst_size_io_tx_write == 0)) {
+               return -7;
+       }
+
+       if ((app.burst_size_io_rx_read > APP_MBUF_ARRAY_SIZE) ||
+               (app.burst_size_io_rx_write > APP_MBUF_ARRAY_SIZE) ||
+               (app.burst_size_worker_read > APP_MBUF_ARRAY_SIZE) ||
+               (app.burst_size_worker_write > APP_MBUF_ARRAY_SIZE) ||
+               ((2 * app.burst_size_io_tx_read) > APP_MBUF_ARRAY_SIZE) ||
+               (app.burst_size_io_tx_write > APP_MBUF_ARRAY_SIZE)) {
+               return -8;
+       }
+
+       return 0;
+}
+
+#ifndef APP_ARG_NUMERICAL_SIZE_CHARS
+#define APP_ARG_NUMERICAL_SIZE_CHARS 15
+#endif
+
+static int
+parse_arg_pos_lb(const char *arg)
+{
+       uint32_t x;
+       char *endpt;
+
+       if (strnlen(arg, APP_ARG_NUMERICAL_SIZE_CHARS + 1) == APP_ARG_NUMERICAL_SIZE_CHARS + 1) {
+               return -1;
+       }
+
+       errno = 0;
+       x = strtoul(arg, &endpt, 10);
+       if (errno != 0 || endpt == arg || *endpt != '\0'){
+               return -2;
+       }
+
+       if (x >= 64) {
+               return -3;
+       }
+
+       app.pos_lb = (uint8_t) x;
+
+       return 0;
+}
+
+/* Parse the argument given in the command line of the application */
+int
+app_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {"rx", 1, 0, 0},
+               {"tx", 1, 0, 0},
+               {"w", 1, 0, 0},
+               {"lpm", 1, 0, 0},
+               {"rsz", 1, 0, 0},
+               {"bsz", 1, 0, 0},
+               {"pos-lb", 1, 0, 0},
+               {NULL, 0, 0, 0}
+       };
+       uint32_t arg_w = 0;
+       uint32_t arg_rx = 0;
+       uint32_t arg_tx = 0;
+       uint32_t arg_lpm = 0;
+       uint32_t arg_rsz = 0;
+       uint32_t arg_bsz = 0;
+       uint32_t arg_pos_lb = 0;
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "",
+                               lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* long options */
+               case 0:
+                       if (!strcmp(lgopts[option_index].name, "rx")) {
+                               arg_rx = 1;
+                               ret = parse_arg_rx(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --rx argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "tx")) {
+                               arg_tx = 1;
+                               ret = parse_arg_tx(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --tx argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "w")) {
+                               arg_w = 1;
+                               ret = parse_arg_w(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --w argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "lpm")) {
+                               arg_lpm = 1;
+                               ret = parse_arg_lpm(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --lpm argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "rsz")) {
+                               arg_rsz = 1;
+                               ret = parse_arg_rsz(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --rsz argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "bsz")) {
+                               arg_bsz = 1;
+                               ret = parse_arg_bsz(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --bsz argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       if (!strcmp(lgopts[option_index].name, "pos-lb")) {
+                               arg_pos_lb = 1;
+                               ret = parse_arg_pos_lb(optarg);
+                               if (ret) {
+                                       printf("Incorrect value for --pos-lb argument (%d)\n", ret);
+                                       return -1;
+                               }
+                       }
+                       break;
+
+               default:
+                       return -1;
+               }
+       }
+
+       /* Check that all mandatory arguments are provided */
+       if ((arg_rx == 0) || (arg_tx == 0) || (arg_w == 0) || (arg_lpm == 0)){
+               printf("Not all mandatory arguments are present\n");
+               return -1;
+       }
+
+       /* Assign default values for the optional arguments not provided */
+       if (arg_rsz == 0) {
+               app.nic_rx_ring_size = APP_DEFAULT_NIC_RX_RING_SIZE;
+               app.nic_tx_ring_size = APP_DEFAULT_NIC_TX_RING_SIZE;
+               app.ring_rx_size = APP_DEFAULT_RING_RX_SIZE;
+               app.ring_tx_size = APP_DEFAULT_RING_TX_SIZE;
+       }
+
+       if (arg_bsz == 0) {
+               app.burst_size_io_rx_read = APP_DEFAULT_BURST_SIZE_IO_RX_READ;
+               app.burst_size_io_rx_write = APP_DEFAULT_BURST_SIZE_IO_RX_WRITE;
+               app.burst_size_io_tx_read = APP_DEFAULT_BURST_SIZE_IO_TX_READ;
+               app.burst_size_io_tx_write = APP_DEFAULT_BURST_SIZE_IO_TX_WRITE;
+               app.burst_size_worker_read = APP_DEFAULT_BURST_SIZE_WORKER_READ;
+               app.burst_size_worker_write = APP_DEFAULT_BURST_SIZE_WORKER_WRITE;
+       }
+
+       if (arg_pos_lb == 0) {
+               app.pos_lb = APP_DEFAULT_IO_RX_LB_POS;
+       }
+
+       /* Check cross-consistency of arguments */
+       if ((ret = app_check_lpm_table()) < 0) {
+               printf("At least one LPM rule is inconsistent (%d)\n", ret);
+               return -1;
+       }
+       if (app_check_every_rx_port_is_tx_enabled() < 0) {
+               printf("On LPM lookup miss, packet is sent back on the input port.\n");
+               printf("At least one RX port is not enabled for TX.\n");
+               return -2;
+       }
+
+       if (optind >= 0)
+               argv[optind - 1] = prgname;
+
+       ret = optind - 1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+int
+app_get_nic_rx_queues_per_port(uint8_t port)
+{
+       uint32_t i, count;
+
+       if (port >= APP_MAX_NIC_PORTS) {
+               return -1;
+       }
+
+       count = 0;
+       for (i = 0; i < APP_MAX_RX_QUEUES_PER_NIC_PORT; i ++) {
+               if (app.nic_rx_queue_mask[port][i] == 1) {
+                       count ++;
+               }
+       }
+
+       return count;
+}
+
+int
+app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
+{
+       uint32_t lcore;
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
+               uint32_t i;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_IO) {
+                       continue;
+               }
+
+               for (i = 0; i < lp->rx.n_nic_queues; i ++) {
+                       if ((lp->rx.nic_queues[i].port == port) &&
+                           (lp->rx.nic_queues[i].queue == queue)) {
+                               *lcore_out = lcore;
+                               return 0;
+                       }
+               }
+       }
+
+       return -1;
+}
+
+int
+app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out)
+{
+       uint32_t lcore;
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
+               uint32_t i;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_IO) {
+                       continue;
+               }
+
+               for (i = 0; i < lp->tx.n_nic_ports; i ++) {
+                       if (lp->tx.nic_ports[i] == port) {
+                               *lcore_out = lcore;
+                               return 0;
+                       }
+               }
+       }
+
+       return -1;
+}
+
+int
+app_is_socket_used(uint32_t socket)
+{
+       uint32_t lcore;
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
+                       continue;
+               }
+
+               if (socket == rte_lcore_to_socket_id(lcore)) {
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+uint32_t
+app_get_lcores_io_rx(void)
+{
+       uint32_t lcore, count;
+
+       count = 0;
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                   (lp_io->rx.n_nic_queues == 0)) {
+                       continue;
+               }
+
+               count ++;
+       }
+
+       return count;
+}
+
+uint32_t
+app_get_lcores_worker(void)
+{
+       uint32_t lcore, count;
+
+       count = 0;
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               count ++;
+       }
+
+       if (count > APP_MAX_WORKER_LCORES) {
+               rte_panic("Algorithmic error (too many worker lcores)\n");
+               return 0;
+       }
+
+       return count;
+}
+
+void
+app_print_params(void)
+{
+       uint32_t port, queue, lcore, rule, i, j;
+
+       /* Print NIC RX configuration */
+       printf("NIC RX ports: ");
+       for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+               uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);
+
+               if (n_rx_queues == 0) {
+                       continue;
+               }
+
+               printf("%u (", port);
+               for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
+                       if (app.nic_rx_queue_mask[port][queue] == 1) {
+                               printf("%u ", queue);
+                       }
+               }
+               printf(")  ");
+       }
+       printf(";\n");
+
+       /* Print I/O lcore RX params */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                   (lp->rx.n_nic_queues == 0)) {
+                       continue;
+               }
+
+               printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));
+
+               printf("RX ports  ");
+               for (i = 0; i < lp->rx.n_nic_queues; i ++) {
+                       printf("(%u, %u)  ",
+                               (uint32_t) lp->rx.nic_queues[i].port,
+                               (uint32_t) lp->rx.nic_queues[i].queue);
+               }
+               printf("; ");
+
+               printf("Output rings  ");
+               for (i = 0; i < lp->rx.n_rings; i ++) {
+                       printf("%p  ", lp->rx.rings[i]);
+               }
+               printf(";\n");
+       }
+
+       /* Print worker lcore RX params */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               printf("Worker lcore %u (socket %u) ID %u: ",
+                       lcore,
+                       rte_lcore_to_socket_id(lcore),
+                       lp->worker_id);
+
+               printf("Input rings  ");
+               for (i = 0; i < lp->n_rings_in; i ++) {
+                       printf("%p  ", lp->rings_in[i]);
+               }
+
+               printf(";\n");
+       }
+
+       printf("\n");
+
+       /* Print NIC TX configuration */
+       printf("NIC TX ports:  ");
+       for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+               if (app.nic_tx_port_mask[port] == 1) {
+                       printf("%u  ", port);
+               }
+       }
+       printf(";\n");
+
+       /* Print I/O TX lcore params */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
+               uint32_t n_workers = app_get_lcores_worker();
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                    (lp->tx.n_nic_ports == 0)) {
+                       continue;
+               }
+
+               printf("I/O lcore %u (socket %u): ", lcore, rte_lcore_to_socket_id(lcore));
+
+               printf("Input rings per TX port  ");
+               for (i = 0; i < lp->tx.n_nic_ports; i ++) {
+                       port = lp->tx.nic_ports[i];
+
+                       printf("%u (", port);
+                       for (j = 0; j < n_workers; j ++) {
+                               printf("%p  ", lp->tx.rings[port][j]);
+                       }
+                       printf(")  ");
+
+               }
+
+               printf(";\n");
+       }
+
+       /* Print worker lcore TX params */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               printf("Worker lcore %u (socket %u) ID %u: \n",
+                       lcore,
+                       rte_lcore_to_socket_id(lcore),
+                       lp->worker_id);
+
+               printf("Output rings per TX port  ");
+               for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+                       if (lp->rings_out[port] != NULL) {
+                               printf("%u (%p)  ", port, lp->rings_out[port]);
+                       }
+               }
+
+               printf(";\n");
+       }
+
+       /* Print LPM rules */
+       printf("LPM rules: \n");
+       for (rule = 0; rule < app.n_lpm_rules; rule ++) {
+               uint32_t ip = app.lpm_rules[rule].ip;
+               uint8_t depth = app.lpm_rules[rule].depth;
+               uint8_t if_out = app.lpm_rules[rule].if_out;
+
+               printf("\t%u: %u.%u.%u.%u/%u => %u;\n",
+                       rule,
+                       (ip & 0xFF000000) >> 24,
+                       (ip & 0x00FF0000) >> 16,
+                       (ip & 0x0000FF00) >> 8,
+                       ip & 0x000000FF,
+                       (uint32_t) depth,
+                       (uint32_t) if_out
+               );
+       }
+
+       /* Rings */
+       printf("Ring sizes: NIC RX = %u; Worker in = %u; Worker out = %u; NIC TX = %u;\n",
+               app.nic_rx_ring_size,
+               app.ring_rx_size,
+               app.ring_tx_size,
+               app.nic_tx_ring_size);
+
+       /* Bursts */
+       printf("Burst sizes: I/O RX (rd = %u, wr = %u); Worker (rd = %u, wr = %u); I/O TX (rd = %u, wr = %u)\n",
+               app.burst_size_io_rx_read,
+               app.burst_size_io_rx_write,
+               app.burst_size_worker_read,
+               app.burst_size_worker_write,
+               app.burst_size_io_tx_read,
+               app.burst_size_io_tx_write);
+}
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
new file mode 100644 (file)
index 0000000..12e8887
--- /dev/null
@@ -0,0 +1,507 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+
+#include "main.h"
+
+static struct rte_eth_conf port_conf = {
+       .rxmode = {
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+       },
+       .rx_adv_conf = {
+               .rss_conf = {
+                       .rss_key = NULL,
+                       .rss_hf = ETH_RSS_IPV4,
+               },
+       },
+       .txmode = {
+       },
+};
+
+static struct rte_eth_rxconf rx_conf = {
+       .rx_thresh = {
+               .pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
+               .hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
+               .wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
+       },
+       .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
+};
+
+static struct rte_eth_txconf tx_conf = {
+       .tx_thresh = {
+               .pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
+               .hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
+               .wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
+       },
+       .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
+       .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
+};
+
+static void
+app_assign_worker_ids(void)
+{
+       uint32_t lcore, worker_id;
+
+       /* Assign ID for each worker */
+       worker_id = 0;
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               lp_worker->worker_id = worker_id;
+               worker_id ++;
+       }
+}
+
+static void
+app_init_mbuf_pools(void)
+{
+       uint32_t socket, lcore;
+
+       /* Init the buffer pools */
+       for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
+               char name[32];
+               if (app_is_socket_used(socket) == 0) {
+                       continue;
+               }
+
+               rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
+               printf("Creating the mbuf pool for socket %u ...\n", socket);
+               app.pools[socket] = rte_mempool_create(
+                       name,
+                       APP_DEFAULT_MEMPOOL_BUFFERS,
+                       APP_DEFAULT_MBUF_SIZE,
+                       APP_DEFAULT_MEMPOOL_CACHE_SIZE,
+                       sizeof(struct rte_pktmbuf_pool_private),
+                       rte_pktmbuf_pool_init, NULL,
+                       rte_pktmbuf_init, NULL,
+                       socket,
+                       0);
+               if (app.pools[socket] == NULL) {
+                       rte_panic("Cannot create mbuf pool on socket %u\n", socket);
+               }
+       }
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
+                       continue;
+               }
+
+               socket = rte_lcore_to_socket_id(lcore);
+               app.lcore_params[lcore].pool = app.pools[socket];
+       }
+}
+
+static void
+app_init_lpm_tables(void)
+{
+       uint32_t socket, lcore;
+
+       /* Init the LPM tables */
+       for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
+               char name[32];
+               uint32_t rule;
+
+               if (app_is_socket_used(socket) == 0) {
+                       continue;
+               }
+
+               rte_snprintf(name, sizeof(name), "lpm_table_%u", socket);
+               printf("Creating the LPM table for socket %u ...\n", socket);
+               app.lpm_tables[socket] = rte_lpm_create(
+                       name,
+                       socket,
+                       APP_MAX_LPM_RULES,
+                       RTE_LPM_MEMZONE);
+               if (app.lpm_tables[socket] == NULL) {
+                       rte_panic("Unable to create LPM table on socket %u\n", socket);
+               }
+
+               for (rule = 0; rule < app.n_lpm_rules; rule ++) {
+                       int ret;
+
+                       ret = rte_lpm_add(app.lpm_tables[socket],
+                               app.lpm_rules[rule].ip,
+                               app.lpm_rules[rule].depth,
+                               app.lpm_rules[rule].if_out);
+
+                       if (ret < 0) {
+                               rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
+                                       rule, app.lpm_rules[rule].ip,
+                                       (uint32_t) app.lpm_rules[rule].depth,
+                                       (uint32_t) app.lpm_rules[rule].if_out,
+                                       socket,
+                                       ret);
+                       }
+               }
+
+       }
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               socket = rte_lcore_to_socket_id(lcore);
+               app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
+       }
+}
+
+static void
+app_init_rings_rx(void)
+{
+       uint32_t lcore;
+
+       /* Initialize the rings for the RX side */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
+               uint32_t socket_io, lcore_worker;
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                   (lp_io->rx.n_nic_queues == 0)) {
+                       continue;
+               }
+
+               socket_io = rte_lcore_to_socket_id(lcore);
+
+               for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
+                       char name[32];
+                       struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
+                       struct rte_ring *ring = NULL;
+
+                       if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
+                               continue;
+                       }
+
+                       printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
+                               lcore,
+                               socket_io,
+                               lcore_worker);
+                       rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
+                               socket_io,
+                               lcore,
+                               lcore_worker);
+                       ring = rte_ring_create(
+                               name,
+                               app.ring_rx_size,
+                               socket_io,
+                               RING_F_SP_ENQ | RING_F_SC_DEQ);
+                       if (ring == NULL) {
+                               rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
+                                       lcore,
+                                       lcore_worker);
+                       }
+
+                       lp_io->rx.rings[lp_io->rx.n_rings] = ring;
+                       lp_io->rx.n_rings ++;
+
+                       lp_worker->rings_in[lp_worker->n_rings_in] = ring;
+                       lp_worker->n_rings_in ++;
+               }
+       }
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                   (lp_io->rx.n_nic_queues == 0)) {
+                       continue;
+               }
+
+               if (lp_io->rx.n_rings != app_get_lcores_worker()) {
+                       rte_panic("Algorithmic error (I/O RX rings)\n");
+               }
+       }
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
+                       rte_panic("Algorithmic error (worker input rings)\n");
+               }
+       }
+}
+
+static void
+app_init_rings_tx(void)
+{
+       uint32_t lcore;
+
+       /* Initialize the rings for the TX side */
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
+               uint32_t port;
+
+               if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
+                       continue;
+               }
+
+               for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+                       char name[32];
+                       struct app_lcore_params_io *lp_io = NULL;
+                       struct rte_ring *ring;
+                       uint32_t socket_io, lcore_io;
+
+                       if (app.nic_tx_port_mask[port] == 0) {
+                               continue;
+                       }
+
+                       if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
+                               rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
+                                       port);
+                       }
+
+                       lp_io = &app.lcore_params[lcore_io].io;
+                       socket_io = rte_lcore_to_socket_id(lcore_io);
+
+                       printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
+                               lcore, port, lcore_io, socket_io);
+                       rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
+                       ring = rte_ring_create(
+                               name,
+                               app.ring_tx_size,
+                               socket_io,
+                               RING_F_SP_ENQ | RING_F_SC_DEQ);
+                       if (ring == NULL) {
+                               rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
+                                       lcore,
+                                       port);
+                       }
+
+                       lp_worker->rings_out[port] = ring;
+                       lp_io->tx.rings[port][lp_worker->worker_id] = ring;
+               }
+       }
+
+       for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
+               struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
+               uint32_t i;
+
+               if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
+                   (lp_io->tx.n_nic_ports == 0)) {
+                       continue;
+               }
+
+               for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
+                       uint32_t port, j;
+
+                       port = lp_io->tx.nic_ports[i];
+                       for (j = 0; j < app_get_lcores_worker(); j ++) {
+                               if (lp_io->tx.rings[port][j] == NULL) {
+                                       rte_panic("Algorithmic error (I/O TX rings)\n");
+                               }
+                       }
+               }
+       }
+}
+
+static void
+app_init_nics(void)
+{
+       uint32_t socket, lcore;
+       uint8_t port, queue;
+       int ret;
+
+       /* Init driver */
+       printf("Initializing the PMD driver ...\n");
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0) {
+               rte_panic("Cannot init IGB PMD\n");
+       }
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0) {
+               rte_panic("Cannot init IXGBE PMD\n");
+       }
+#endif
+       if (rte_eal_pci_probe() < 0) {
+               rte_panic("Cannot probe PCI\n");
+       }
+
+       /* Init NIC ports and queues, then start the ports */
+       for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+               struct rte_eth_link link;
+               struct rte_mempool *pool;
+               uint32_t n_rx_queues, n_tx_queues;
+
+               n_rx_queues = app_get_nic_rx_queues_per_port(port);
+               n_tx_queues = app.nic_tx_port_mask[port];
+
+               if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
+                       continue;
+               }
+
+               /* Init port */
+               printf("Initializing NIC port %u ...\n", (uint32_t) port);
+               ret = rte_eth_dev_configure(
+                       port,
+                       (uint8_t) n_rx_queues,
+                       (uint8_t) n_tx_queues,
+                       &port_conf);
+               if (ret < 0) {
+                       rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret);
+               }
+               rte_eth_promiscuous_enable(port);
+
+               /* Init RX queues */
+               for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
+                       if (app.nic_rx_queue_mask[port][queue] == 0) {
+                               continue;
+                       }
+
+                       app_get_lcore_for_nic_rx(port, queue, &lcore);
+                       socket = rte_lcore_to_socket_id(lcore);
+                       pool = app.lcore_params[lcore].pool;
+
+                       printf("Initializing NIC port %u RX queue %u ...\n",
+                               (uint32_t) port,
+                               (uint32_t) queue);
+                       ret = rte_eth_rx_queue_setup(
+                               port,
+                               queue,
+                               (uint16_t) app.nic_rx_ring_size,
+                               socket,
+                               &rx_conf,
+                               pool);
+                       if (ret < 0) {
+                               rte_panic("Cannot init RX queue %u for port %u (%d)\n",
+                                       (uint32_t) queue,
+                                       (uint32_t) port,
+                                       ret);
+                       }
+               }
+
+               /* Init TX queues */
+               if (app.nic_tx_port_mask[port] == 1) {
+                       app_get_lcore_for_nic_tx(port, &lcore);
+                       socket = rte_lcore_to_socket_id(lcore);
+                       printf("Initializing NIC port %u TX queue 0 ...\n",
+                               (uint32_t) port);
+                       ret = rte_eth_tx_queue_setup(
+                               port,
+                               0,
+                               (uint16_t) app.nic_tx_ring_size,
+                               socket,
+                               &tx_conf);
+                       if (ret < 0) {
+                               rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
+                                       port,
+                                       ret);
+                       }
+               }
+
+               /* Start port */
+               ret = rte_eth_dev_start(port);
+               if (ret < 0) {
+                       rte_panic("Cannot start port %d (%d)\n", port, ret);
+               }
+
+               /* Get link status */
+               rte_eth_link_get(port, &link);
+               if (link.link_status) {
+                       printf("Port %u is UP (%u Mbps)\n",
+                               (uint32_t) port,
+                               (unsigned) link.link_speed);
+               } else {
+                       printf("Port %u is DOWN\n",
+                               (uint32_t) port);
+               }
+       }
+}
+
+void
+app_init(void)
+{
+       app_assign_worker_ids();
+       app_init_mbuf_pools();
+       app_init_lpm_tables();
+       app_init_rings_rx();
+       app_init_rings_tx();
+       app_init_nics();
+
+       printf("Initialization completed.\n");
+}
diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c
new file mode 100644 (file)
index 0000000..108211c
--- /dev/null
@@ -0,0 +1,112 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+
+#include "main.h"
+
+int
+MAIN(int argc, char **argv)
+{
+       uint32_t lcore;
+       int ret;
+
+       /* Init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               return -1;
+       argc -= ret;
+       argv += ret;
+
+       /* Parse application arguments (after the EAL ones) */
+       ret = app_parse_args(argc, argv);
+       if (ret < 0) {
+               app_print_usage();
+               return -1;
+       }
+
+       /* Init */
+       app_init();
+       app_print_params();
+
+       /* Launch per-lcore init on every lcore */
+       rte_eal_mp_remote_launch(app_lcore_main_loop, NULL, CALL_MASTER);
+       RTE_LCORE_FOREACH_SLAVE(lcore) {
+               if (rte_eal_wait_lcore(lcore) < 0) {
+                       return -1;
+               }
+       }
+
+       return 0;
+}
diff --git a/examples/load_balancer/main.h b/examples/load_balancer/main.h
new file mode 100644 (file)
index 0000000..650f750
--- /dev/null
@@ -0,0 +1,377 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+/* Logical cores */
+#ifndef APP_MAX_SOCKETS
+#define APP_MAX_SOCKETS 2
+#endif
+
+#ifndef APP_MAX_LCORES
+#define APP_MAX_LCORES       RTE_MAX_LCORE
+#endif
+
+#ifndef APP_MAX_NIC_PORTS
+#define APP_MAX_NIC_PORTS    RTE_MAX_ETHPORTS
+#endif
+
+#ifndef APP_MAX_RX_QUEUES_PER_NIC_PORT
+#define APP_MAX_RX_QUEUES_PER_NIC_PORT 128
+#endif
+
+#ifndef APP_MAX_TX_QUEUES_PER_NIC_PORT
+#define APP_MAX_TX_QUEUES_PER_NIC_PORT 128
+#endif
+
+#ifndef APP_MAX_IO_LCORES
+#define APP_MAX_IO_LCORES 16
+#endif
+#if (APP_MAX_IO_LCORES > APP_MAX_LCORES)
+#error "APP_MAX_IO_LCORES is too big"
+#endif
+
+#ifndef APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE
+#define APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE 16
+#endif
+
+#ifndef APP_MAX_NIC_TX_PORTS_PER_IO_LCORE
+#define APP_MAX_NIC_TX_PORTS_PER_IO_LCORE 16
+#endif
+#if (APP_MAX_NIC_TX_PORTS_PER_IO_LCORE > APP_MAX_NIC_PORTS)
+#error "APP_MAX_NIC_TX_PORTS_PER_IO_LCORE too big"
+#endif
+
+#ifndef APP_MAX_WORKER_LCORES
+#define APP_MAX_WORKER_LCORES 16
+#endif
+#if (APP_MAX_WORKER_LCORES > APP_MAX_LCORES)
+#error "APP_MAX_WORKER_LCORES is too big"
+#endif
+
+
+/* Mempools */
+#ifndef APP_DEFAULT_MBUF_SIZE
+#define APP_DEFAULT_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#endif
+
+#ifndef APP_DEFAULT_MEMPOOL_BUFFERS
+#define APP_DEFAULT_MEMPOOL_BUFFERS   8192
+#endif
+
+#ifndef APP_DEFAULT_MEMPOOL_CACHE_SIZE
+#define APP_DEFAULT_MEMPOOL_CACHE_SIZE  256
+#endif
+
+/* LPM Tables */
+#ifndef APP_MAX_LPM_RULES
+#define APP_MAX_LPM_RULES 1024
+#endif
+
+/* NIC RX */
+#ifndef APP_DEFAULT_NIC_RX_RING_SIZE
+#define APP_DEFAULT_NIC_RX_RING_SIZE 1024
+#endif
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+#ifndef APP_DEFAULT_NIC_RX_PTHRESH
+#define APP_DEFAULT_NIC_RX_PTHRESH  8
+#endif
+
+#ifndef APP_DEFAULT_NIC_RX_HTHRESH
+#define APP_DEFAULT_NIC_RX_HTHRESH  8
+#endif
+
+#ifndef APP_DEFAULT_NIC_RX_WTHRESH
+#define APP_DEFAULT_NIC_RX_WTHRESH  4
+#endif
+
+#ifndef APP_DEFAULT_NIC_RX_FREE_THRESH
+#define APP_DEFAULT_NIC_RX_FREE_THRESH  64
+#endif
+
+/* NIC TX */
+#ifndef APP_DEFAULT_NIC_TX_RING_SIZE
+#define APP_DEFAULT_NIC_TX_RING_SIZE 1024
+#endif
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#ifndef APP_DEFAULT_NIC_TX_PTHRESH
+#define APP_DEFAULT_NIC_TX_PTHRESH  36
+#endif
+
+#ifndef APP_DEFAULT_NIC_TX_HTHRESH
+#define APP_DEFAULT_NIC_TX_HTHRESH  0
+#endif
+
+#ifndef APP_DEFAULT_NIC_TX_WTHRESH
+#define APP_DEFAULT_NIC_TX_WTHRESH  0
+#endif
+
+#ifndef APP_DEFAULT_NIC_TX_FREE_THRESH
+#define APP_DEFAULT_NIC_TX_FREE_THRESH  0
+#endif
+
+#ifndef APP_DEFAULT_NIC_TX_RS_THRESH
+#define APP_DEFAULT_NIC_TX_RS_THRESH  0
+#endif
+
+/* Software Rings */
+#ifndef APP_DEFAULT_RING_RX_SIZE
+#define APP_DEFAULT_RING_RX_SIZE 1024
+#endif
+
+#ifndef APP_DEFAULT_RING_TX_SIZE
+#define APP_DEFAULT_RING_TX_SIZE 1024
+#endif
+
+/* Bursts */
+#ifndef APP_MBUF_ARRAY_SIZE
+#define APP_MBUF_ARRAY_SIZE   512
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_IO_RX_READ
+#define APP_DEFAULT_BURST_SIZE_IO_RX_READ  144
+#endif
+#if (APP_DEFAULT_BURST_SIZE_IO_RX_READ > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_IO_RX_READ is too big"
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_IO_RX_WRITE
+#define APP_DEFAULT_BURST_SIZE_IO_RX_WRITE  144
+#endif
+#if (APP_DEFAULT_BURST_SIZE_IO_RX_WRITE > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_IO_RX_WRITE is too big"
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_IO_TX_READ
+#define APP_DEFAULT_BURST_SIZE_IO_TX_READ  144
+#endif
+#if (APP_DEFAULT_BURST_SIZE_IO_TX_READ > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_IO_TX_READ is too big"
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_IO_TX_WRITE
+#define APP_DEFAULT_BURST_SIZE_IO_TX_WRITE  144
+#endif
+#if (APP_DEFAULT_BURST_SIZE_IO_TX_WRITE > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_IO_TX_WRITE is too big"
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_WORKER_READ
+#define APP_DEFAULT_BURST_SIZE_WORKER_READ  144
+#endif
+#if ((2 * APP_DEFAULT_BURST_SIZE_WORKER_READ) > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_WORKER_READ is too big"
+#endif
+
+#ifndef APP_DEFAULT_BURST_SIZE_WORKER_WRITE
+#define APP_DEFAULT_BURST_SIZE_WORKER_WRITE  144
+#endif
+#if (APP_DEFAULT_BURST_SIZE_WORKER_WRITE > APP_MBUF_ARRAY_SIZE)
+#error "APP_DEFAULT_BURST_SIZE_WORKER_WRITE is too big"
+#endif
+
+/* Load balancing logic */
+#ifndef APP_DEFAULT_IO_RX_LB_POS
+#define APP_DEFAULT_IO_RX_LB_POS 29
+#endif
+#if (APP_DEFAULT_IO_RX_LB_POS >= 64)
+#error "APP_DEFAULT_IO_RX_LB_POS is too big"
+#endif
+
+struct app_mbuf_array {
+       struct rte_mbuf *array[APP_MBUF_ARRAY_SIZE];
+       uint32_t n_mbufs;
+};
+
+enum app_lcore_type {
+       e_APP_LCORE_DISABLED = 0,
+       e_APP_LCORE_IO,
+       e_APP_LCORE_WORKER
+};
+
+struct app_lcore_params_io {
+       /* I/O RX */
+       struct {
+               /* NIC */
+               struct {
+                       uint8_t port;
+                       uint8_t queue;
+               } nic_queues[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
+               uint32_t n_nic_queues;
+
+               /* Rings */
+               struct rte_ring *rings[APP_MAX_WORKER_LCORES];
+               uint32_t n_rings;
+
+               /* Internal buffers */
+               struct app_mbuf_array mbuf_in;
+               struct app_mbuf_array mbuf_out[APP_MAX_WORKER_LCORES];
+               uint8_t mbuf_out_flush[APP_MAX_WORKER_LCORES];
+
+               /* Stats */
+               uint32_t nic_queues_count[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
+               uint32_t nic_queues_iters[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
+               uint32_t rings_count[APP_MAX_WORKER_LCORES];
+               uint32_t rings_iters[APP_MAX_WORKER_LCORES];
+       } rx;
+
+       /* I/O TX */
+       struct {
+               /* Rings */
+               struct rte_ring *rings[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
+
+               /* NIC */
+               uint8_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+               uint32_t n_nic_ports;
+
+               /* Internal buffers */
+               struct app_mbuf_array mbuf_out[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+               uint8_t mbuf_out_flush[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+
+               /* Stats */
+               uint32_t rings_count[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
+               uint32_t rings_iters[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
+               uint32_t nic_ports_count[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+               uint32_t nic_ports_iters[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+       } tx;
+};
+
+struct app_lcore_params_worker {
+       /* Rings */
+       struct rte_ring *rings_in[APP_MAX_IO_LCORES];
+       uint32_t n_rings_in;
+       struct rte_ring *rings_out[APP_MAX_NIC_PORTS];
+
+       /* LPM table */
+       struct rte_lpm *lpm_table;
+       uint32_t worker_id;
+
+       /* Internal buffers */
+       struct app_mbuf_array mbuf_in;
+       struct app_mbuf_array mbuf_out[APP_MAX_NIC_PORTS];
+       uint8_t mbuf_out_flush[APP_MAX_NIC_PORTS];
+
+       /* Stats */
+       uint32_t rings_in_count[APP_MAX_IO_LCORES];
+       uint32_t rings_in_iters[APP_MAX_IO_LCORES];
+       uint32_t rings_out_count[APP_MAX_NIC_PORTS];
+       uint32_t rings_out_iters[APP_MAX_NIC_PORTS];
+};
+
+struct app_lcore_params {
+       union {
+               struct app_lcore_params_io io;
+               struct app_lcore_params_worker worker;
+       };
+       enum app_lcore_type type;
+       struct rte_mempool *pool;
+} __rte_cache_aligned;
+
+struct app_lpm_rule {
+       uint32_t ip;
+       uint8_t depth;
+       uint8_t if_out;
+};
+
+struct app_params {
+       /* lcore */
+       struct app_lcore_params lcore_params[APP_MAX_LCORES];
+
+       /* NIC */
+       uint8_t nic_rx_queue_mask[APP_MAX_NIC_PORTS][APP_MAX_RX_QUEUES_PER_NIC_PORT];
+       uint8_t nic_tx_port_mask[APP_MAX_NIC_PORTS];
+
+       /* mbuf pools */
+       struct rte_mempool *pools[APP_MAX_SOCKETS];
+
+       /* LPM tables */
+       struct rte_lpm *lpm_tables[APP_MAX_SOCKETS];
+       struct app_lpm_rule lpm_rules[APP_MAX_LPM_RULES];
+       uint32_t n_lpm_rules;
+
+       /* rings */
+       uint32_t nic_rx_ring_size;
+       uint32_t nic_tx_ring_size;
+       uint32_t ring_rx_size;
+       uint32_t ring_tx_size;
+
+       /* burst size */
+       uint32_t burst_size_io_rx_read;
+       uint32_t burst_size_io_rx_write;
+       uint32_t burst_size_io_tx_read;
+       uint32_t burst_size_io_tx_write;
+       uint32_t burst_size_worker_read;
+       uint32_t burst_size_worker_write;
+
+       /* load balancing */
+       uint8_t pos_lb;
+} __rte_cache_aligned;
+
+extern struct app_params app;
+
+int app_parse_args(int argc, char **argv);
+void app_print_usage(void);
+void app_init(void);
+int app_lcore_main_loop(void *arg);
+
+int app_get_nic_rx_queues_per_port(uint8_t port);
+int app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out);
+int app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out);
+int app_is_socket_used(uint32_t socket);
+uint32_t app_get_lcores_io_rx(void);
+uint32_t app_get_lcores_worker(void);
+void app_print_params(void);
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
new file mode 100644 (file)
index 0000000..d349df3
--- /dev/null
@@ -0,0 +1,669 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_lpm.h>
+
+#include "main.h"
+
+#ifndef APP_LCORE_IO_FLUSH
+#define APP_LCORE_IO_FLUSH           1000000
+#endif
+
+#ifndef APP_LCORE_WORKER_FLUSH
+#define APP_LCORE_WORKER_FLUSH       1000000
+#endif
+
+#ifndef APP_STATS
+#define APP_STATS                    1000000
+#endif
+
+#define APP_IO_RX_DROP_ALL_PACKETS   0
+#define APP_WORKER_DROP_ALL_PACKETS  0
+#define APP_IO_TX_DROP_ALL_PACKETS   0
+
+#ifndef APP_IO_RX_PREFETCH_ENABLE
+#define APP_IO_RX_PREFETCH_ENABLE    1
+#endif
+
+#ifndef APP_WORKER_PREFETCH_ENABLE
+#define APP_WORKER_PREFETCH_ENABLE   1
+#endif
+
+#ifndef APP_IO_TX_PREFETCH_ENABLE
+#define APP_IO_TX_PREFETCH_ENABLE    1
+#endif
+
+#if APP_IO_RX_PREFETCH_ENABLE
+#define APP_IO_RX_PREFETCH0(p)       rte_prefetch0(p)
+#define APP_IO_RX_PREFETCH1(p)       rte_prefetch1(p)
+#else
+#define APP_IO_RX_PREFETCH0(p)
+#define APP_IO_RX_PREFETCH1(p)
+#endif
+
+#if APP_WORKER_PREFETCH_ENABLE
+#define APP_WORKER_PREFETCH0(p)      rte_prefetch0(p)
+#define APP_WORKER_PREFETCH1(p)      rte_prefetch1(p)
+#else
+#define APP_WORKER_PREFETCH0(p)
+#define APP_WORKER_PREFETCH1(p)
+#endif
+
+#if APP_IO_TX_PREFETCH_ENABLE
+#define APP_IO_TX_PREFETCH0(p)       rte_prefetch0(p)
+#define APP_IO_TX_PREFETCH1(p)       rte_prefetch1(p)
+#else
+#define APP_IO_TX_PREFETCH0(p)
+#define APP_IO_TX_PREFETCH1(p)
+#endif
+
+static inline void
+app_lcore_io_rx_buffer_to_send (
+       struct app_lcore_params_io *lp,
+       uint32_t worker,
+       struct rte_mbuf *mbuf,
+       uint32_t bsz)
+{
+       uint32_t pos;
+       int ret;
+
+       pos = lp->rx.mbuf_out[worker].n_mbufs;
+       lp->rx.mbuf_out[worker].array[pos ++] = mbuf;
+       if (likely(pos < bsz)) {
+               lp->rx.mbuf_out[worker].n_mbufs = pos;
+               return;
+       }
+
+       ret = rte_ring_sp_enqueue_bulk(
+               lp->rx.rings[worker],
+               (void **) lp->rx.mbuf_out[worker].array,
+               bsz);
+
+       if (unlikely(ret == -ENOBUFS)) {
+               uint32_t k;
+               for (k = 0; k < bsz; k ++) {
+                       struct rte_mbuf *m = lp->rx.mbuf_out[worker].array[k];
+                       rte_pktmbuf_free(m);
+               }
+       }
+
+       lp->rx.mbuf_out[worker].n_mbufs = 0;
+       lp->rx.mbuf_out_flush[worker] = 0;
+
+#if APP_STATS
+       lp->rx.rings_iters[worker] ++;
+       if (likely(ret == 0)) {
+               lp->rx.rings_count[worker] ++;
+       }
+       if (unlikely(lp->rx.rings_iters[worker] == APP_STATS)) {
+               uint32_t lcore = rte_lcore_id();
+
+               printf("\tI/O RX %u out (worker %u): enq success rate = %.2f\n",
+                       lcore,
+                       worker,
+                       ((double) lp->rx.rings_count[worker]) / ((double) lp->rx.rings_iters[worker]));
+               lp->rx.rings_iters[worker] = 0;
+               lp->rx.rings_count[worker] = 0;
+       }
+#endif
+}
+
+static inline void
+app_lcore_io_rx(
+       struct app_lcore_params_io *lp,
+       uint32_t n_workers,
+       uint32_t bsz_rd,
+       uint32_t bsz_wr,
+       uint8_t pos_lb)
+{
+       struct rte_mbuf *mbuf_1_0, *mbuf_1_1, *mbuf_2_0, *mbuf_2_1;
+       uint8_t *data_1_0, *data_1_1;
+       uint32_t i;
+
+       for (i = 0; i < lp->rx.n_nic_queues; i ++) {
+               uint8_t port = lp->rx.nic_queues[i].port;
+               uint8_t queue = lp->rx.nic_queues[i].queue;
+               uint32_t n_mbufs, j;
+
+               n_mbufs = rte_eth_rx_burst(
+                       port,
+                       queue,
+                       lp->rx.mbuf_in.array,
+                       (uint16_t) bsz_rd);
+
+               if (unlikely(n_mbufs == 0)) {
+                       continue;
+               }
+
+#if APP_STATS
+               lp->rx.nic_queues_iters[i] ++;
+               lp->rx.nic_queues_count[i] += n_mbufs;
+               if (unlikely(lp->rx.nic_queues_iters[i] == APP_STATS)) {
+                       struct rte_eth_stats stats;
+                       uint32_t lcore = rte_lcore_id();
+
+                       rte_eth_stats_get(port, &stats);
+
+                       printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
+                               lcore,
+                               (uint32_t) port,
+                               (double) stats.ierrors / (double) (stats.ierrors + stats.ipackets),
+                               ((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
+                       lp->rx.nic_queues_iters[i] = 0;
+                       lp->rx.nic_queues_count[i] = 0;
+               }
+#endif
+
+#if APP_IO_RX_DROP_ALL_PACKETS
+               for (j = 0; j < n_mbufs; j ++) {
+                       struct rte_mbuf *pkt = lp->rx.mbuf_in.array[j];
+                       rte_pktmbuf_free(pkt);
+               }
+
+               continue;
+#endif
+
+               mbuf_1_0 = lp->rx.mbuf_in.array[0];
+               mbuf_1_1 = lp->rx.mbuf_in.array[1];
+               data_1_0 = rte_pktmbuf_mtod(mbuf_1_0, uint8_t *);
+               if (likely(n_mbufs > 1)) {
+                       data_1_1 = rte_pktmbuf_mtod(mbuf_1_1, uint8_t *);
+               }
+
+               mbuf_2_0 = lp->rx.mbuf_in.array[2];
+               mbuf_2_1 = lp->rx.mbuf_in.array[3];
+               APP_IO_RX_PREFETCH0(mbuf_2_0);
+               APP_IO_RX_PREFETCH0(mbuf_2_1);
+
+               for (j = 0; j + 3 < n_mbufs; j += 2) {
+                       struct rte_mbuf *mbuf_0_0, *mbuf_0_1;
+                       uint8_t *data_0_0, *data_0_1;
+                       uint32_t worker_0, worker_1;
+
+                       mbuf_0_0 = mbuf_1_0;
+                       mbuf_0_1 = mbuf_1_1;
+                       data_0_0 = data_1_0;
+                       data_0_1 = data_1_1;
+
+                       mbuf_1_0 = mbuf_2_0;
+                       mbuf_1_1 = mbuf_2_1;
+                       data_1_0 = rte_pktmbuf_mtod(mbuf_2_0, uint8_t *);
+                       data_1_1 = rte_pktmbuf_mtod(mbuf_2_1, uint8_t *);
+                       APP_IO_RX_PREFETCH0(data_1_0);
+                       APP_IO_RX_PREFETCH0(data_1_1);
+
+                       mbuf_2_0 = lp->rx.mbuf_in.array[j+4];
+                       mbuf_2_1 = lp->rx.mbuf_in.array[j+5];
+                       APP_IO_RX_PREFETCH0(mbuf_2_0);
+                       APP_IO_RX_PREFETCH0(mbuf_2_1);
+
+                       worker_0 = data_0_0[pos_lb] & (n_workers - 1);
+                       worker_1 = data_0_1[pos_lb] & (n_workers - 1);
+
+                       app_lcore_io_rx_buffer_to_send(lp, worker_0, mbuf_0_0, bsz_wr);
+                       app_lcore_io_rx_buffer_to_send(lp, worker_1, mbuf_0_1, bsz_wr);
+               }
+
+               /* Handle the last 1, 2 (when n_mbufs is even) or 3 (when n_mbufs is odd) packets  */
+               for ( ; j < n_mbufs; j += 1) {
+                       struct rte_mbuf *mbuf;
+                       uint8_t *data;
+                       uint32_t worker;
+
+                       mbuf = mbuf_1_0;
+                       mbuf_1_0 = mbuf_1_1;
+                       mbuf_1_1 = mbuf_2_0;
+                       mbuf_2_0 = mbuf_2_1;
+
+                       data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+
+                       APP_IO_RX_PREFETCH0(mbuf_1_0);
+
+                       worker = data[pos_lb] & (n_workers - 1);
+
+                       app_lcore_io_rx_buffer_to_send(lp, worker, mbuf, bsz_wr);
+               }
+       }
+}
+
+static inline void
+app_lcore_io_rx_flush(struct app_lcore_params_io *lp, uint32_t n_workers)
+{
+       uint32_t worker;
+
+       for (worker = 0; worker < n_workers; worker ++) {
+               int ret;
+
+               if (likely((lp->rx.mbuf_out_flush[worker] == 0) ||
+                          (lp->rx.mbuf_out[worker].n_mbufs == 0))) {
+                       lp->rx.mbuf_out_flush[worker] = 1;
+                       continue;
+               }
+
+               ret = rte_ring_sp_enqueue_bulk(
+                       lp->rx.rings[worker],
+                       (void **) lp->rx.mbuf_out[worker].array,
+                       lp->rx.mbuf_out[worker].n_mbufs);
+
+               if (unlikely(ret < 0)) {
+                       uint32_t k;
+                       for (k = 0; k < lp->rx.mbuf_out[worker].n_mbufs; k ++) {
+                               struct rte_mbuf *pkt_to_free = lp->rx.mbuf_out[worker].array[k];
+                               rte_pktmbuf_free(pkt_to_free);
+                       }
+               }
+
+               lp->rx.mbuf_out[worker].n_mbufs = 0;
+               lp->rx.mbuf_out_flush[worker] = 1;
+       }
+}
+
+static inline void
+app_lcore_io_tx(
+       struct app_lcore_params_io *lp,
+       uint32_t n_workers,
+       uint32_t bsz_rd,
+       uint32_t bsz_wr)
+{
+       uint32_t worker;
+
+       for (worker = 0; worker < n_workers; worker ++) {
+               uint32_t i;
+
+               for (i = 0; i < lp->tx.n_nic_ports; i ++) {
+                       uint8_t port = lp->tx.nic_ports[i];
+                       struct rte_ring *ring = lp->tx.rings[port][worker];
+                       uint32_t n_mbufs, n_pkts;
+                       int ret;
+
+                       n_mbufs = lp->tx.mbuf_out[port].n_mbufs;
+                       ret = rte_ring_sc_dequeue_bulk(
+                               ring,
+                               (void **) &lp->tx.mbuf_out[port].array[n_mbufs],
+                               bsz_rd);
+
+                       if (unlikely(ret == -ENOENT)) {
+                               continue;
+                       }
+
+                       n_mbufs += bsz_rd;
+
+#if APP_IO_TX_DROP_ALL_PACKETS
+                       {
+                               uint32_t j;
+                               APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[0]);
+                               APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[1]);
+
+                               for (j = 0; j < n_mbufs; j ++) {
+                                       if (likely(j < n_mbufs - 2)) {
+                                               APP_IO_TX_PREFETCH0(lp->tx.mbuf_out[port].array[j + 2]);
+                                       }
+
+                                       rte_pktmbuf_free(lp->tx.mbuf_out[port].array[j]);
+                               }
+
+                               lp->tx.mbuf_out[port].n_mbufs = 0;
+
+                               continue;
+                       }
+#endif
+
+                       if (unlikely(n_mbufs < bsz_wr)) {
+                               lp->tx.mbuf_out[port].n_mbufs = n_mbufs;
+                               continue;
+                       }
+
+                       n_pkts = rte_eth_tx_burst(
+                               port,
+                               0,
+                               lp->tx.mbuf_out[port].array,
+                               (uint16_t) n_mbufs);
+
+#if APP_STATS
+                       lp->tx.nic_ports_iters[port] ++;
+                       lp->tx.nic_ports_count[port] += n_pkts;
+                       if (unlikely(lp->tx.nic_ports_iters[port] == APP_STATS)) {
+                               uint32_t lcore = rte_lcore_id();
+
+                               printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
+                                       lcore,
+                                       (uint32_t) port,
+                                       ((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
+                               lp->tx.nic_ports_iters[port] = 0;
+                               lp->tx.nic_ports_count[port] = 0;
+                       }
+#endif
+
+                       if (unlikely(n_pkts < n_mbufs)) {
+                               uint32_t k;
+                               for (k = n_pkts; k < n_mbufs; k ++) {
+                                       struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
+                                       rte_pktmbuf_free(pkt_to_free);
+                               }
+                       }
+                       lp->tx.mbuf_out[port].n_mbufs = 0;
+                       lp->tx.mbuf_out_flush[port] = 0;
+               }
+       }
+}
+
+static inline void
+app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
+{
+       uint8_t port;
+
+       for (port = 0; port < lp->tx.n_nic_ports; port ++) {
+               uint32_t n_pkts;
+
+               if (likely((lp->tx.mbuf_out_flush[port] == 0) ||
+                          (lp->tx.mbuf_out[port].n_mbufs == 0))) {
+                       lp->tx.mbuf_out_flush[port] = 1;
+                       continue;
+               }
+
+               n_pkts = rte_eth_tx_burst(
+                       port,
+                       0,
+                       lp->tx.mbuf_out[port].array,
+                       (uint16_t) lp->tx.mbuf_out[port].n_mbufs);
+
+               if (unlikely(n_pkts < lp->tx.mbuf_out[port].n_mbufs)) {
+                       uint32_t k;
+                       for (k = n_pkts; k < lp->tx.mbuf_out[port].n_mbufs; k ++) {
+                               struct rte_mbuf *pkt_to_free = lp->tx.mbuf_out[port].array[k];
+                               rte_pktmbuf_free(pkt_to_free);
+                       }
+               }
+
+               lp->tx.mbuf_out[port].n_mbufs = 0;
+               lp->tx.mbuf_out_flush[port] = 1;
+       }
+}
+
+static void
+app_lcore_main_loop_io(void)
+{
+       uint32_t lcore = rte_lcore_id();
+       struct app_lcore_params_io *lp = &app.lcore_params[lcore].io;
+       uint32_t n_workers = app_get_lcores_worker();
+       uint64_t i = 0;
+
+       uint32_t bsz_rx_rd = app.burst_size_io_rx_read;
+       uint32_t bsz_rx_wr = app.burst_size_io_rx_write;
+       uint32_t bsz_tx_rd = app.burst_size_io_tx_read;
+       uint32_t bsz_tx_wr = app.burst_size_io_tx_write;
+
+       uint8_t pos_lb = app.pos_lb;
+
+       for ( ; ; ) {
+               if (APP_LCORE_IO_FLUSH && (unlikely(i == APP_LCORE_IO_FLUSH))) {
+                       if (likely(lp->rx.n_nic_queues > 0)) {
+                               app_lcore_io_rx_flush(lp, n_workers);
+                       }
+
+                       if (likely(lp->tx.n_nic_ports > 0)) {
+                               app_lcore_io_tx_flush(lp);
+                       }
+
+                       i = 0;
+               }
+
+               if (likely(lp->rx.n_nic_queues > 0)) {
+                       app_lcore_io_rx(lp, n_workers, bsz_rx_rd, bsz_rx_wr, pos_lb);
+               }
+
+               if (likely(lp->tx.n_nic_ports > 0)) {
+                       app_lcore_io_tx(lp, n_workers, bsz_tx_rd, bsz_tx_wr);
+               }
+
+               i ++;
+       }
+}
+
+static inline void
+app_lcore_worker(
+       struct app_lcore_params_worker *lp,
+       uint32_t bsz_rd,
+       uint32_t bsz_wr)
+{
+       uint32_t i;
+
+       for (i = 0; i < lp->n_rings_in; i ++) {
+               struct rte_ring *ring_in = lp->rings_in[i];
+               uint32_t j;
+               int ret;
+
+               ret = rte_ring_sc_dequeue_bulk(
+                       ring_in,
+                       (void **) lp->mbuf_in.array,
+                       bsz_rd);
+
+               if (unlikely(ret == -ENOENT)) {
+                       continue;
+               }
+
+#if APP_WORKER_DROP_ALL_PACKETS
+               for (j = 0; j < bsz_rd; j ++) {
+                       struct rte_mbuf *pkt = lp->mbuf_in.array[j];
+                       rte_pktmbuf_free(pkt);
+               }
+
+               continue;
+#endif
+
+               APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[0], unsigned char *));
+               APP_WORKER_PREFETCH0(lp->mbuf_in.array[1]);
+
+               for (j = 0; j < bsz_rd; j ++) {
+                       struct rte_mbuf *pkt;
+                       struct ipv4_hdr *ipv4_hdr;
+                       uint32_t ipv4_dst, pos;
+                       uint8_t port;
+
+                       if (likely(j < bsz_rd - 1)) {
+                               APP_WORKER_PREFETCH1(rte_pktmbuf_mtod(lp->mbuf_in.array[j+1], unsigned char *));
+                       }
+                       if (likely(j < bsz_rd - 2)) {
+                               APP_WORKER_PREFETCH0(lp->mbuf_in.array[j+2]);
+                       }
+
+                       pkt = lp->mbuf_in.array[j];
+                       ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, unsigned char *) + sizeof(struct ether_hdr));
+                       ipv4_dst = rte_be_to_cpu_32(ipv4_hdr->dst_addr);
+
+                       if (unlikely(rte_lpm_lookup(lp->lpm_table, ipv4_dst, &port) != 0)) {
+                               port = pkt->pkt.in_port;
+                       }
+
+                       pos = lp->mbuf_out[port].n_mbufs;
+
+                       lp->mbuf_out[port].array[pos ++] = pkt;
+                       if (likely(pos < bsz_wr)) {
+                               lp->mbuf_out[port].n_mbufs = pos;
+                               continue;
+                       }
+
+                       ret = rte_ring_sp_enqueue_bulk(
+                               lp->rings_out[port],
+                               (void **) lp->mbuf_out[port].array,
+                               bsz_wr);
+
+#if APP_STATS
+                       lp->rings_out_iters[port] ++;
+                       if (ret == 0) {
+                               lp->rings_out_count[port] += 1;
+                       }
+                       if (lp->rings_out_iters[port] == APP_STATS){
+                               printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
+                                       lp->worker_id,
+                                       (uint32_t) port,
+                                       ((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
+                               lp->rings_out_iters[port] = 0;
+                               lp->rings_out_count[port] = 0;
+                       }
+#endif
+
+                       if (unlikely(ret == -ENOBUFS)) {
+                               uint32_t k;
+                               for (k = 0; k < bsz_wr; k ++) {
+                                       struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
+                                       rte_pktmbuf_free(pkt_to_free);
+                               }
+                       }
+
+                       lp->mbuf_out[port].n_mbufs = 0;
+                       lp->mbuf_out_flush[port] = 0;
+               }
+       }
+}
+
+static inline void
+app_lcore_worker_flush(struct app_lcore_params_worker *lp)
+{
+       uint32_t port;
+
+       for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
+               int ret;
+
+               if (unlikely(lp->rings_out[port] == NULL)) {
+                       continue;
+               }
+
+               if (likely((lp->mbuf_out_flush[port] == 0) ||
+                          (lp->mbuf_out[port].n_mbufs == 0))) {
+                       lp->mbuf_out_flush[port] = 1;
+                       continue;
+               }
+
+               ret = rte_ring_sp_enqueue_bulk(
+                       lp->rings_out[port],
+                       (void **) lp->mbuf_out[port].array,
+                       lp->mbuf_out[port].n_mbufs);
+
+               if (unlikely(ret < 0)) {
+                       uint32_t k;
+                       for (k = 0; k < lp->mbuf_out[port].n_mbufs; k ++) {
+                               struct rte_mbuf *pkt_to_free = lp->mbuf_out[port].array[k];
+                               rte_pktmbuf_free(pkt_to_free);
+                       }
+               }
+
+               lp->mbuf_out[port].n_mbufs = 0;
+               lp->mbuf_out_flush[port] = 1;
+       }
+}
+
+static void
+app_lcore_main_loop_worker(void) {
+       uint32_t lcore = rte_lcore_id();
+       struct app_lcore_params_worker *lp = &app.lcore_params[lcore].worker;
+       uint64_t i = 0;
+
+       uint32_t bsz_rd = app.burst_size_worker_read;
+       uint32_t bsz_wr = app.burst_size_worker_write;
+
+       for ( ; ; ) {
+               if (APP_LCORE_WORKER_FLUSH && (unlikely(i == APP_LCORE_WORKER_FLUSH))) {
+                       app_lcore_worker_flush(lp);
+                       i = 0;
+               }
+
+               app_lcore_worker(lp, bsz_rd, bsz_wr);
+
+               i ++;
+       }
+}
+
+int
+app_lcore_main_loop(__attribute__((unused)) void *arg)
+{
+       struct app_lcore_params *lp;
+       uint32_t lcore;
+
+       lcore = rte_lcore_id();
+       lp = &app.lcore_params[lcore];
+
+       if (lp->type == e_APP_LCORE_IO) {
+               printf("Logical core %u (I/O) main loop.\n", lcore);
+               app_lcore_main_loop_io();
+       }
+
+       if (lp->type == e_APP_LCORE_WORKER) {
+               printf("Logical core %u (worker %u) main loop.\n",
+                       lcore,
+                       lp->worker.worker_id);
+               app_lcore_main_loop_worker();
+       }
+
+       return 0;
+}
diff --git a/examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf b/examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf
new file mode 100644 (file)
index 0000000..ec041da
Binary files /dev/null and b/examples/multi_process/482253_Multi_Process_Sample_App_Guide_Rev1.3.pdf differ
diff --git a/examples/multi_process/Makefile b/examples/multi_process/Makefile
new file mode 100644 (file)
index 0000000..0abeafa
--- /dev/null
@@ -0,0 +1,49 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+include $(RTE_SDK)/mk/rte.vars.mk
+unexport RTE_SRCDIR RTE_OUTPUT RTE_EXTMK
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += $(wildcard *_mp)
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+$(DIRS-y):
+       $(MAKE) -C $@ $(MAKECMDGOALS)
diff --git a/examples/multi_process/client_server_mp/Makefile b/examples/multi_process/client_server_mp/Makefile
new file mode 100644 (file)
index 0000000..abb11ba
--- /dev/null
@@ -0,0 +1,49 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+include $(RTE_SDK)/mk/rte.vars.mk
+unexport RTE_SRCDIR RTE_OUTPUT RTE_EXTMK
+
+DIRS-$(CONFIG_RTE_EXEC_ENV_LINUXAPP) += $(wildcard mp_*)
+
+.PHONY: all clean $(DIRS-y)
+
+all: $(DIRS-y)
+clean: $(DIRS-y)
+
+$(DIRS-y):
+       $(MAKE) -C $@ $(MAKECMDGOALS)
diff --git a/examples/multi_process/client_server_mp/mp_client/Makefile b/examples/multi_process/client_server_mp/mp_client/Makefile
new file mode 100644 (file)
index 0000000..202fce3
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = mp_client
+
+# all source are stored in SRCS-y
+SRCS-y := client.c
+
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
new file mode 100644 (file)
index 0000000..bfb7476
--- /dev/null
@@ -0,0 +1,294 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "init_drivers.h"
+
+/* Number of packets to attempt to read from queue */
+#define PKT_READ_SIZE  ((uint16_t)32)
+
+/* our client id number - tells us which rx queue to read, and NIC TX
+ * queue to write to. */
+static uint8_t client_id = 0;
+
+struct mbuf_queue {
+#define MBQ_CAPACITY 32
+       struct rte_mbuf *bufs[MBQ_CAPACITY];
+       uint16_t top;
+};
+
+/* maps input ports to output ports for packets */
+static uint8_t output_ports[RTE_MAX_ETHPORTS];
+
+/* buffers up a set of packet that are ready to send */
+static struct mbuf_queue output_bufs[RTE_MAX_ETHPORTS];
+
+/* shared data from server. We update statistics here */
+static volatile struct tx_stats *tx_stats;
+
+
+/*
+ * print a usage message
+ */
+static void
+usage(const char *progname)
+{
+       printf("Usage: %s [EAL args] -- -n <client_id>\n\n", progname);
+}
+
+/*
+ * Convert the client id number from a string to an int.
+ */
+static int
+parse_client_num(const char *client)
+{
+       char *end = NULL;
+       unsigned long temp;
+
+       if (client == NULL || *client == '\0')
+               return -1;
+
+       temp = strtoul(client, &end, 10);
+       if (end == NULL || *end != '\0')
+               return -1;
+
+       client_id = (uint8_t)temp;
+       return 0;
+}
+
+/*
+ * Parse the application arguments to the client app.
+ */
+static int
+parse_app_args(int argc, char *argv[])
+{
+       int option_index, opt;
+       char **argvopt = argv;
+       const char *progname = NULL;
+       static struct option lgopts[] = { /* no long options */
+               {NULL, 0, 0, 0 }
+       };
+       progname = argv[0];
+
+       while ((opt = getopt_long(argc, argvopt, "n:", lgopts,
+               &option_index)) != EOF){
+               switch (opt){
+                       case 'n':
+                               if (parse_client_num(optarg) != 0){
+                                       usage(progname);
+                                       return -1;
+                               }
+                               break;
+                       default:
+                               usage(progname);
+                               return -1;
+               }
+       }
+       return 0;
+}
+
+/*
+ * set up output ports so that all traffic on port gets sent out
+ * its paired port. Index using actual port numbers since that is
+ * what comes in the mbuf structure.
+ */
+static void configure_output_ports(const struct port_info *ports)
+{
+       int i;
+       if (ports->num_ports > RTE_MAX_ETHPORTS)
+               rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n",
+                               (unsigned)RTE_MAX_ETHPORTS);
+       for (i = 0; i < ports->num_ports - 1; i+=2){
+               uint8_t p1 = ports->id[i];
+               uint8_t p2 = ports->id[i+1];
+               output_ports[p1] = p2;
+               output_ports[p2] = p1;
+       }
+}
+
+
+static inline void
+send_packets(uint8_t port)
+{
+       uint16_t i, sent;
+       struct mbuf_queue *mbq = &output_bufs[port];
+
+       if (unlikely(mbq->top == 0))
+               return;
+
+       sent = rte_eth_tx_burst(port, client_id, mbq->bufs, mbq->top);
+       if (unlikely(sent < mbq->top)){
+               for (i = sent; i < mbq->top; i++)
+                       rte_pktmbuf_free(mbq->bufs[i]);
+               tx_stats->tx_drop[port] += (mbq->top - sent);
+       }
+       tx_stats->tx[port] += sent;
+       mbq->top = 0;
+}
+
+/*
+ * Enqueue a packet to be sent on a particular port, but
+ * don't send it yet. Only when the buffer is full.
+ */
+static inline void
+enqueue_packet(struct rte_mbuf *buf, uint8_t port)
+{
+       struct mbuf_queue *mbq = &output_bufs[port];
+       mbq->bufs[mbq->top++] = buf;
+
+       if (mbq->top == MBQ_CAPACITY)
+               send_packets(port);
+}
+
+/*
+ * This function performs routing of packets
+ * Just sends each input packet out an output port based solely on the input
+ * port it arrived on.
+ */
+static void
+handle_packet(struct rte_mbuf *buf)
+{
+       const uint8_t in_port = buf->pkt.in_port;
+       const uint8_t out_port = output_ports[in_port];
+
+       enqueue_packet(buf, out_port);
+}
+
+/*
+ * Application main function - loops through
+ * receiving and processing packets. Never returns
+ */
+int
+main(int argc, char *argv[])
+{
+       const struct rte_memzone *mz;
+       struct rte_ring *rx_ring;
+       struct rte_mempool *mp;
+       struct port_info *ports;
+       int need_flush = 0; /* indicates whether we have unsent packets */
+       int retval;
+       void *pkts[PKT_READ_SIZE];
+
+       if ((retval = rte_eal_init(argc, argv)) < 0)
+               return -1;
+       argc -= retval;
+       argv += retval;
+
+       if (parse_app_args(argc, argv) < 0)
+               rte_exit(EXIT_FAILURE, "Invalid command-line arguments\n");
+
+       if (init_drivers() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot get NIC ports\n");
+       if (rte_eth_dev_count() == 0)
+               rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+       rx_ring = rte_ring_lookup(get_rx_queue_name(client_id));
+       if (rx_ring == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot get RX ring - is server process running?\n");
+
+       mp = rte_mempool_lookup(PKTMBUF_POOL_NAME);
+       if (mp == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot get mempool for mbufs\n");
+
+       mz = rte_memzone_lookup(MZ_PORT_INFO);
+       if (mz == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot get port info structure\n");
+       ports = mz->addr;
+       tx_stats = &(ports->tx_stats[client_id]);
+
+       configure_output_ports(ports);
+
+       RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+       printf("\nClient process %d handling packets\n", client_id);
+       printf("[Press Ctrl-C to quit ...]\n");
+
+       for (;;) {
+               uint16_t i, rx_pkts = PKT_READ_SIZE;
+               uint8_t port;
+
+               /* try dequeuing max possible packets first, if that fails, get the
+                * most we can. Loop body should only execute once, maximum */
+               while (rx_pkts > 0 &&
+                               unlikely(rte_ring_dequeue_bulk(rx_ring, pkts, rx_pkts) != 0))
+                       rx_pkts = (uint16_t)RTE_MIN(rte_ring_count(rx_ring), PKT_READ_SIZE);
+
+               if (unlikely(rx_pkts == 0)){
+                       if (need_flush)
+                               for (port = 0; port < ports->num_ports; port++)
+                                       send_packets(ports->id[port]);
+                       need_flush = 0;
+                       continue;
+               }
+
+               for (i = 0; i < rx_pkts; i++)
+                       handle_packet(pkts[i]);
+
+               need_flush = 1;
+       }
+}
diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile
new file mode 100644 (file)
index 0000000..009c641
--- /dev/null
@@ -0,0 +1,63 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+$(error This application can only operate in a linuxapp environment, \
+please change the definition of the RTE_TARGET environment variable)
+endif
+
+# binary name
+APP = mp_server
+
+# all source are stored in SRCS-y
+SRCS-y := main.c init.c args.c
+
+INC := $(wildcard *.h)
+
+CFLAGS += $(WERROR_FLAGS) -O3
+CFLAGS += -I$(SRCDIR)/../shared
+
+# for newer gcc, e.g. 4.4, no-strict-aliasing may not be necessary
+# and so the next line can be removed in those cases.
+EXTRA_CFLAGS += -fno-strict-aliasing
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/multi_process/client_server_mp/mp_server/args.c b/examples/multi_process/client_server_mp/mp_server/args.c
new file mode 100644 (file)
index 0000000..ecdddab
--- /dev/null
@@ -0,0 +1,175 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_memory.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+
+/* global var for number of clients - extern in header */
+uint8_t num_clients;
+
+static const char *progname;
+
+/**
+ * Prints out usage information to stdout
+ */
+static void
+usage(void)
+{
+       printf(
+           "%s [EAL options] -- -p PORTMASK -n NUM_CLIENTS [-s NUM_SOCKETS]\n"
+           " -p PORTMASK: hexadecimal bitmask of ports to use\n"
+           " -n NUM_CLIENTS: number of client processes to use\n"
+           , progname);
+}
+
+/**
+ * The ports to be used by the application are passed in
+ * the form of a bitmask. This function parses the bitmask
+ * and places the port numbers to be used into the port[]
+ * array variable
+ */
+static int
+parse_portmask(uint8_t max_ports, const char *portmask)
+{
+       char *end = NULL;
+       unsigned long pm;
+       uint8_t count = 0;
+
+       if (portmask == NULL || *portmask == '\0')
+               return -1;
+
+       /* convert parameter to a number and verify */
+       pm = strtoul(portmask, &end, 16);
+       if (end == NULL || *end != '\0' || pm == 0)
+               return -1;
+
+       /* loop through bits of the mask and mark ports */
+       while (pm != 0){
+               if (pm & 0x01){ /* bit is set in mask, use port */
+                       if (count >= max_ports)
+                               printf("WARNING: requested port %u not present"
+                               " - ignoring\n", (unsigned)count);
+                       else
+                           ports->id[ports->num_ports++] = count;
+               }
+               pm = (pm >> 1);
+               count++;
+       }
+
+       return 0;
+}
+
+/**
+ * Take the number of clients parameter passed to the app
+ * and convert to a number to store in the num_clients variable
+ */
+static int
+parse_num_clients(const char *clients)
+{
+       char *end = NULL;
+       unsigned long temp;
+
+       if (clients == NULL || *clients == '\0')
+               return -1;
+
+       temp = strtoul(clients, &end, 10);
+       if (end == NULL || *end != '\0' || temp == 0)
+               return -1;
+
+       num_clients = (uint8_t)temp;
+       return 0;
+}
+
+/**
+ * The application specific arguments follow the DPDK-specific
+ * arguments which are stripped by the DPDK init. This function
+ * processes these application arguments, printing usage info
+ * on error.
+ */
+int
+parse_app_args(uint8_t max_ports, int argc, char *argv[])
+{
+       int option_index, opt;
+       char **argvopt = argv;
+       static struct option lgopts[] = { /* no long options */
+               {NULL, 0, 0, 0 }
+       };
+       progname = argv[0];
+
+       while ((opt = getopt_long(argc, argvopt, "n:p:", lgopts,
+               &option_index)) != EOF){
+               switch (opt){
+                       case 'p':
+                               if (parse_portmask(max_ports, optarg) != 0){
+                                       usage();
+                                       return -1;
+                               }
+                               break;
+                       case 'n':
+                               if (parse_num_clients(optarg) != 0){
+                                       usage();
+                                       return -1;
+                               }
+                               break;
+                       default:
+                               printf("ERROR: Unknown option '%c'\n", opt);
+                               usage();
+                               return -1;
+               }
+       }
+
+       if (ports->num_ports == 0 || num_clients == 0){
+               usage();
+               return -1;
+       }
+
+       if (ports->num_ports % 2 != 0){
+               printf("ERROR: application requires an even number of ports to use\n");
+               return -1;
+       }
+       return 0;
+}
+
diff --git a/examples/multi_process/client_server_mp/mp_server/args.h b/examples/multi_process/client_server_mp/mp_server/args.h
new file mode 100644 (file)
index 0000000..d2ff6ee
--- /dev/null
@@ -0,0 +1,41 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _ARGS_H_
+#define _ARGS_H_
+
+int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
+
+#endif /* ifndef _ARGS_H_ */
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
new file mode 100644 (file)
index 0000000..cbaccb9
--- /dev/null
@@ -0,0 +1,304 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_hash_crc.h>
+#include <rte_fbk_hash.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "init_drivers.h"
+#include "args.h"
+#include "init.h"
+#include "main.h"
+
+#define MBUFS_PER_CLIENT 1536
+#define MBUFS_PER_PORT 1536
+#define MBUF_CACHE_SIZE 512
+#define MBUF_OVERHEAD (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define RX_MBUF_DATA_SIZE 2048
+#define MBUF_SIZE (RX_MBUF_DATA_SIZE + MBUF_OVERHEAD)
+
+#define RTE_MP_RX_DESC_DEFAULT 512
+#define RTE_MP_TX_DESC_DEFAULT 512
+#define CLIENT_QUEUE_RINGSIZE 128
+
+#define NO_FLAGS 0
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+/* Default configuration for rx and tx thresholds etc. */
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+#define MP_DEFAULT_PTHRESH 36
+#define MP_DEFAULT_RX_HTHRESH 8
+#define MP_DEFAULT_TX_HTHRESH 0
+#define MP_DEFAULT_WTHRESH 0
+
+static const struct rte_eth_rxconf rx_conf_default = {
+               .rx_thresh = {
+                               .pthresh = MP_DEFAULT_PTHRESH,
+                               .hthresh = MP_DEFAULT_RX_HTHRESH,
+                               .wthresh = MP_DEFAULT_WTHRESH,
+               },
+};
+
+static const struct rte_eth_txconf tx_conf_default = {
+               .tx_thresh = {
+                               .pthresh = MP_DEFAULT_PTHRESH,
+                               .hthresh = MP_DEFAULT_TX_HTHRESH,
+                               .wthresh = MP_DEFAULT_WTHRESH,
+               },
+               .tx_free_thresh = 0, /* Use PMD default values */
+               .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+/* The mbuf pool for packet rx */
+struct rte_mempool *pktmbuf_pool;
+
+/* array of info/queues for clients */
+struct client *clients = NULL;
+
+/* the port details */
+struct port_info *ports;
+
+/**
+ * Initialise the mbuf pool for packet reception for the NIC, and any other
+ * buffer pools needed by the app - currently none.
+ */
+static int
+init_mbuf_pools(void)
+{
+       const unsigned num_mbufs = (num_clients * MBUFS_PER_CLIENT) \
+                       + (ports->num_ports * MBUFS_PER_PORT);
+
+       /* don't pass single-producer/single-consumer flags to mbuf create as it
+        * seems faster to use a cache instead */
+       printf("Creating mbuf pool '%s' [%u mbufs] ...\n",
+                       PKTMBUF_POOL_NAME, num_mbufs);
+       pktmbuf_pool = rte_mempool_create(PKTMBUF_POOL_NAME, num_mbufs,
+                       MBUF_SIZE, MBUF_CACHE_SIZE,
+                       sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init,
+                       NULL, rte_pktmbuf_init, NULL, SOCKET0, NO_FLAGS );
+
+       return (pktmbuf_pool == NULL); /* 0  on success */
+}
+
+/**
+ * Initialise an individual port:
+ * - configure number of rx and tx rings
+ * - set up each rx ring, to pull from the main mbuf pool
+ * - set up each tx ring
+ * - start the port and report its status to stdout
+ */
+static int
+init_port(uint8_t port_num)
+{
+       /* for port configuration all features are off by default */
+       const struct rte_eth_conf port_conf = {
+               .rxmode = {
+                       .mq_mode = ETH_RSS
+               }
+       };
+       const uint16_t rx_rings = 1, tx_rings = num_clients;
+       const uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT;
+       const uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT;
+
+       struct rte_eth_link link;
+       uint16_t q;
+       int retval;
+
+       printf("Port %u init ... ", (unsigned)port_num);
+       fflush(stdout);
+
+       /* Standard DPDK port initialisation - config port, then set up
+        * rx and tx rings */
+       if ((retval = rte_eth_dev_configure(port_num, rx_rings, tx_rings,
+               &port_conf)) != 0)
+               return retval;
+
+       for (q = 0; q < rx_rings; q++) {
+               retval = rte_eth_rx_queue_setup(port_num, q, rx_ring_size,
+                               SOCKET0, &rx_conf_default, pktmbuf_pool);
+               if (retval < 0) return retval;
+       }
+
+       for ( q = 0; q < tx_rings; q ++ ) {
+               retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
+                               SOCKET0, &tx_conf_default);
+               if (retval < 0) return retval;
+       }
+
+       rte_eth_promiscuous_enable(port_num);
+
+       retval  = rte_eth_dev_start(port_num);
+       if (retval < 0) return retval;
+
+       printf( "done: ");
+
+       /* get link status */
+       rte_eth_link_get(port_num, &link);
+       if (link.link_status) {
+               printf(" Link Up - speed %u Mbps - %s\n",
+                      (uint32_t) link.link_speed,
+                      (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                      ("full-duplex") : ("half-duplex\n"));
+       }
+       else{
+               printf(" Link Down\n");
+       }
+       return 0;
+}
+
+/**
+ * Set up the DPDK rings which will be used to pass packets, via
+ * pointers, between the multi-process server and client processes.
+ * Each client needs one RX queue.
+ */
+static int
+init_shm_rings(void)
+{
+       unsigned i;
+       const unsigned ringsize = CLIENT_QUEUE_RINGSIZE;
+
+       clients = rte_malloc("client details",
+               sizeof(*clients) * num_clients, 0);
+       if (clients == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot allocate memory for client program details\n");
+
+       for (i = 0; i < num_clients; i++) {
+               /* Create an RX queue for each client */
+               clients[i].rx_q = rte_ring_create(get_rx_queue_name(i),
+                               ringsize, SOCKET0,
+                               RING_F_SP_ENQ | RING_F_SC_DEQ ); /* single prod, single cons */
+               if (clients[i].rx_q == NULL)
+                       rte_exit(EXIT_FAILURE, "Cannot create rx ring queue for client %u\n", i);
+       }
+       return 0;
+}
+
+/**
+ * Main init function for the multi-process server app,
+ * calls subfunctions to do each stage of the initialisation.
+ */
+int
+init(int argc, char *argv[])
+{
+       int retval;
+       const struct rte_memzone *mz;
+       uint8_t i, total_ports;
+
+       /* init EAL, parsing EAL args */
+       retval = rte_eal_init(argc, argv);
+       if (retval < 0)
+               return -1;
+       argc -= retval;
+       argv += retval;
+
+       /* initialise the nic drivers */
+       retval = init_drivers();
+       if (retval != 0)
+               rte_exit(EXIT_FAILURE, "Cannot initialise drivers\n");
+
+       /* get total number of ports */
+       total_ports = rte_eth_dev_count();
+
+       /* set up array for port data */
+       mz = rte_memzone_reserve(MZ_PORT_INFO, sizeof(*ports),
+                               rte_socket_id(), NO_FLAGS);
+       if (mz == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot reserve memory zone for port information\n");
+       memset(mz->addr, 0, sizeof(*ports));
+       ports = mz->addr;
+
+       /* parse additional, application arguments */
+       retval = parse_app_args(total_ports, argc, argv);
+       if (retval != 0)
+               return -1;
+
+       /* initialise mbuf pools */
+       retval = init_mbuf_pools();
+       if (retval != 0)
+               rte_exit(EXIT_FAILURE, "Cannot create needed mbuf pools\n");
+
+       /* now initialise the ports we will use */
+       for (i = 0; i < ports->num_ports; i++) {
+               retval = init_port(ports->id[i]);
+               if (retval != 0)
+                       rte_exit(EXIT_FAILURE, "Cannot initialise port %u\n",
+                                       (unsigned)i);
+       }
+
+       /* initialise the client queues/rings for inter-eu comms */
+       init_shm_rings();
+
+       return 0;
+}
diff --git a/examples/multi_process/client_server_mp/mp_server/init.h b/examples/multi_process/client_server_mp/mp_server/init.h
new file mode 100644 (file)
index 0000000..2d4ab58
--- /dev/null
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _INIT_H_
+#define _INIT_H_
+
+/*
+ * #include <rte_ring.h>
+ * #include "args.h"
+ */
+
+/*
+ * Define a client structure with all needed info, including
+ * stats from the clients.
+ */
+struct client {
+       struct rte_ring *rx_q;
+       unsigned client_id;
+       /* these stats hold how many packets the client will actually receive,
+        * and how many packets were dropped because the client's queue was full.
+        * The port-info stats, in contrast, record how many packets were received
+        * or transmitted on an actual NIC port.
+        */
+       struct {
+               volatile uint64_t rx;
+               volatile uint64_t rx_drop;
+       } stats;
+};
+
+extern struct client *clients;
+
+/* the shared port information: port numbers, rx and tx stats etc. */
+extern struct port_info *ports;
+
+extern struct rte_mempool *pktmbuf_pool;
+extern uint8_t num_clients;
+extern unsigned num_sockets;
+extern struct port_info *ports;
+
+int init(int argc, char *argv[]);
+
+#endif /* ifndef _INIT_H_ */
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
new file mode 100644 (file)
index 0000000..efbe051
--- /dev/null
@@ -0,0 +1,330 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <netinet/ip.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_byteorder.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_atomic.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ethdev.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_hash_crc.h>
+#include <rte_fbk_hash.h>
+#include <rte_string_fns.h>
+
+#include "common.h"
+#include "args.h"
+#include "init.h"
+#include "main.h"
+
+/*
+ * When doing reads from the NIC or the client queues,
+ * use this batch size
+ */
+#define PACKET_READ_SIZE 32
+
+/*
+ * Local buffers to put packets in, used to send packets in bursts to the
+ * clients
+ */
+struct client_rx_buf {
+       struct rte_mbuf *buffer[PACKET_READ_SIZE];
+       uint16_t count;
+};
+
+/* One buffer per client rx queue - dynamically allocate array */
+static struct client_rx_buf *cl_rx_buf;
+
+static const char *
+get_printable_mac_addr(uint8_t port)
+{
+       static const char err_address[] = "00:00:00:00:00:00";
+       static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
+
+       if (unlikely(port >= RTE_MAX_ETHPORTS))
+               return err_address;
+       if (unlikely(addresses[port][0]=='\0')){
+               struct ether_addr mac;
+               rte_eth_macaddr_get(port, &mac);
+               rte_snprintf(addresses[port], sizeof(addresses[port]),
+                               "%02x:%02x:%02x:%02x:%02x:%02x\n",
+                               mac.addr_bytes[0], mac.addr_bytes[1], mac.addr_bytes[2],
+                               mac.addr_bytes[3], mac.addr_bytes[4], mac.addr_bytes[5]);
+       }
+       return addresses[port];
+}
+
+/*
+ * This function displays the recorded statistics for each port
+ * and for each client. It uses ANSI terminal codes to clear
+ * screen when called. It is called from a single non-master
+ * thread in the server process, when the process is run with more
+ * than one lcore enabled.
+ */
+static void
+do_stats_display(void)
+{
+       unsigned i, j;
+       const char clr[] = { 27, '[', '2', 'J', '\0' };
+       const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
+       uint64_t port_tx[RTE_MAX_ETHPORTS], port_tx_drop[RTE_MAX_ETHPORTS];
+       uint64_t client_tx[MAX_CLIENTS], client_tx_drop[MAX_CLIENTS];
+
+       /* to get TX stats, we need to do some summing calculations */
+       memset(port_tx, 0, sizeof(port_tx));
+       memset(port_tx_drop, 0, sizeof(port_tx_drop));
+       memset(client_tx, 0, sizeof(client_tx));
+       memset(client_tx_drop, 0, sizeof(client_tx_drop));
+
+       for (i = 0; i < num_clients; i++){
+               const volatile struct tx_stats *tx = &ports->tx_stats[i];
+               for (j = 0; j < ports->num_ports; j++){
+                       /* assign to local variables here, save re-reading volatile vars */
+                       const uint64_t tx_val = tx->tx[j];
+                       const uint64_t drop_val = tx->tx_drop[j];
+                       port_tx[j] += tx_val;
+                       port_tx_drop[j] += drop_val;
+                       client_tx[i] += tx_val;
+                       client_tx_drop[i] += drop_val;
+               }
+       }
+
+       /* Clear screen and move to top left */
+       printf("%s%s", clr, topLeft);
+
+       printf("PORTS\n");
+       printf("-----\n");
+       for (i = 0; i < ports->num_ports; i++)
+               printf("Port %u: '%s'\t", (unsigned)ports->id[i],
+                               get_printable_mac_addr(ports->id[i]));
+       printf("\n\n");
+       for (i = 0; i < ports->num_ports; i++){
+               printf("Port %u - rx: %9"PRIu64"\t"
+                               "tx: %9"PRIu64"\n",
+                               (unsigned)ports->id[i], ports->rx_stats.rx[i],
+                               port_tx[i]);
+       }
+
+       printf("\nCLIENTS\n");
+       printf("-------\n");
+       for (i = 0; i < num_clients; i++){
+               const unsigned long long rx = clients[i].stats.rx;
+               const unsigned long long rx_drop = clients[i].stats.rx_drop;
+               printf("Client %2u - rx: %9llu, rx_drop: %9llu\n"
+                               "            tx: %9"PRIu64", tx_drop: %9"PRIu64"\n",
+                               i, rx, rx_drop, client_tx[i], client_tx_drop[i]);
+       }
+
+       printf("\n");
+}
+
+/*
+ * The function called from each non-master lcore used by the process.
+ * The test_and_set function is used to randomly pick a single lcore on which
+ * the code to display the statistics will run. Otherwise, the code just
+ * repeatedly sleeps.
+ */
+static int
+sleep_lcore(__attribute__((unused)) void *dummy)
+{
+       /* Used to pick a display thread - static, so zero-initialised */
+       static rte_atomic32_t display_stats;
+
+       /* Only one core should display stats */
+       if (rte_atomic32_test_and_set(&display_stats)) {
+               const unsigned sleeptime = 1;
+               printf("Core %u displaying statistics\n", rte_lcore_id());
+
+               /* Longer initial pause so above printf is seen */
+               sleep(sleeptime * 3);
+
+               /* Loop forever: sleep always returns 0 or <= param */
+               while (sleep(sleeptime) <= sleeptime)
+                       do_stats_display();
+       }
+       else {
+               const unsigned sleeptime = 100;
+               printf("Putting core %u to sleep\n", rte_lcore_id());
+               while (sleep(sleeptime) <= sleeptime)
+                       ; /* loop doing nothing */
+       }
+       return 0;
+}
+
+/*
+ * Function to set all the client statistic values to zero.
+ * Called at program startup.
+ */
+static void
+clear_stats(void)
+{
+       unsigned i;
+
+       for (i = 0; i < num_clients; i++)
+               clients[i].stats.rx = clients[i].stats.rx_drop = 0;
+}
+
+/*
+ * send a burst of traffic to a client, assuming there are packets
+ * available to be sent to this client
+ */
+static void
+flush_rx_queue(uint16_t client)
+{
+       uint16_t j;
+       struct client *cl;
+
+       if (cl_rx_buf[client].count == 0)
+               return;
+
+       cl = &clients[client];
+       if (rte_ring_enqueue_bulk(cl->rx_q, (void **)cl_rx_buf[client].buffer,
+                       cl_rx_buf[client].count) != 0){
+               for (j = 0; j < cl_rx_buf[client].count; j++)
+                       rte_pktmbuf_free(cl_rx_buf[client].buffer[j]);
+               cl->stats.rx_drop += cl_rx_buf[client].count;
+       }
+       else
+               cl->stats.rx += cl_rx_buf[client].count;
+
+       cl_rx_buf[client].count = 0;
+}
+
+/*
+ * marks a packet down to be sent to a particular client process
+ */
+static inline void
+enqueue_rx_packet(uint8_t client, struct rte_mbuf *buf)
+{
+       cl_rx_buf[client].buffer[cl_rx_buf[client].count++] = buf;
+}
+
+/*
+ * This function takes a group of packets and routes them
+ * individually to the client process. Very simply round-robins the packets
+ * without checking any of the packet contents.
+ */
+static void
+process_packets(uint32_t port_num __rte_unused,
+               struct rte_mbuf *pkts[], uint16_t rx_count)
+{
+       uint16_t i;
+       uint8_t client = 0;
+
+       for (i = 0; i < rx_count; i++) {
+               enqueue_rx_packet(client, pkts[i]);
+
+               if (++client == num_clients)
+                       client = 0;
+       }
+
+       for (i = 0; i < num_clients; i++)
+               flush_rx_queue(i);
+}
+
+/*
+ * Function called by the master lcore of the DPDK process.
+ */
+static void
+do_packet_forwarding(void)
+{
+       unsigned port_num = 0; /* indexes the port[] array */
+
+       for (;;) {
+               struct rte_mbuf *buf[PACKET_READ_SIZE];
+               uint16_t rx_count;
+
+               /* read a port */
+               rx_count = rte_eth_rx_burst(ports->id[port_num], 0, \
+                               buf, PACKET_READ_SIZE);
+               ports->rx_stats.rx[port_num] += rx_count;
+
+               /* Now process the NIC packets read */
+               if (likely(rx_count > 0))
+                       process_packets(port_num, buf, rx_count);
+
+               /* move to next port */
+               if (++port_num == ports->num_ports)
+                       port_num = 0;
+       }
+}
+
+int
+MAIN(int argc, char *argv[])
+{
+       /* initialise the system */
+       if (init(argc, argv) < 0 )
+               return -1;
+       RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+       cl_rx_buf = calloc(num_clients, sizeof(cl_rx_buf[0]));
+
+       /* clear statistics */
+       clear_stats();
+
+       /* put all other cores to sleep bar master */
+       rte_eal_mp_remote_launch(sleep_lcore, NULL, SKIP_MASTER);
+
+       do_packet_forwarding();
+       return 0;
+}
diff --git a/examples/multi_process/client_server_mp/mp_server/main.h b/examples/multi_process/client_server_mp/mp_server/main.h
new file mode 100644 (file)
index 0000000..1794abc
--- /dev/null
@@ -0,0 +1,50 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#define SOCKET0 0
+#define SOCKET1 1
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/multi_process/client_server_mp/shared/common.h b/examples/multi_process/client_server_mp/shared/common.h
new file mode 100644 (file)
index 0000000..46cc4f3
--- /dev/null
@@ -0,0 +1,89 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#define MAX_CLIENTS             16
+
+/*
+ * Shared port info, including statistics information for display by server.
+ * Structure will be put in a memzone.
+ * - All port id values share one cache line as this data will be read-only
+ * during operation.
+ * - All rx statistic values share cache lines, as this data is written only
+ * by the server process. (rare reads by stats display)
+ * - The tx statistics have values for all ports per cache line, but the stats
+ * themselves are written by the clients, so we have a distinct set, on different
+ * cache lines for each client to use.
+ */
+struct rx_stats{
+       uint64_t rx[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct tx_stats{
+       uint64_t tx[RTE_MAX_ETHPORTS];
+       uint64_t tx_drop[RTE_MAX_ETHPORTS];
+} __rte_cache_aligned;
+
+struct port_info {
+       uint8_t num_ports;
+       uint8_t id[RTE_MAX_ETHPORTS];
+       volatile struct rx_stats rx_stats;
+       volatile struct tx_stats tx_stats[MAX_CLIENTS];
+};
+
+/* define common names for structures shared between server and client */
+#define MP_CLIENT_RXQ_NAME "MProc_Client_%u_RX"
+#define PKTMBUF_POOL_NAME "MProc_pktmbuf_pool"
+#define MZ_PORT_INFO "MProc_port_info"
+
+/*
+ * Given the rx queue name template above, get the queue name
+ */
+static inline const char *
+get_rx_queue_name(unsigned id)
+{
+       /* buffer for return value. Size calculated by %u being replaced
+        * by maximum 3 digits (plus an extra byte for safety) */
+       static char buffer[sizeof(MP_CLIENT_RXQ_NAME) + 2];
+
+       rte_snprintf(buffer, sizeof(buffer) - 1, MP_CLIENT_RXQ_NAME, id);
+       return buffer;
+}
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+#endif
diff --git a/examples/multi_process/client_server_mp/shared/init_drivers.h b/examples/multi_process/client_server_mp/shared/init_drivers.h
new file mode 100644 (file)
index 0000000..658c841
--- /dev/null
@@ -0,0 +1,58 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _INIT_DRIVERS_H_
+#define _INIT_DRIVERS_H_
+
+/**
+ * Initialise all 1G and 10G NICs available
+ */
+static inline int
+init_drivers(void)
+{
+       if (
+#ifdef RTE_LIBRTE_IGB_PMD
+                       (rte_igb_pmd_init() < 0) ||
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+                       (rte_ixgbe_pmd_init() < 0) ||
+#endif
+                       (rte_eal_pci_probe() < 0 ))
+               return -1;
+
+       return 0;
+}
+
+#endif
diff --git a/examples/multi_process/simple_mp/Makefile b/examples/multi_process/simple_mp/Makefile
new file mode 100644 (file)
index 0000000..fb9d81d
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = simple_mp
+
+# all source are stored in SRCS-y
+SRCS-y := main.c mp_commands.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/multi_process/simple_mp/main.c b/examples/multi_process/simple_mp/main.c
new file mode 100644 (file)
index 0000000..166fc80
--- /dev/null
@@ -0,0 +1,160 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * This sample application is a simple multi-process application which
+ * demostrates sharing of queues and memory pools between processes, and
+ * using those queues/pools for communication between the processes.
+ *
+ * Application is designed to run with two processes, a primary and a
+ * secondary, and each accepts commands on the commandline, the most
+ * important of which is "send", which just sends a string to the other
+ * process.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <unistd.h>
+#include <termios.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+#include "mp_commands.h"
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+#define SOCKET0 0
+
+static const char *_MSG_POOL = "MSG_POOL";
+static const char *_SEC_2_PRI = "SEC_2_PRI";
+static const char *_PRI_2_SEC = "PRI_2_SEC";
+const unsigned string_size = 64;
+
+struct rte_ring *send_ring, *recv_ring;
+struct rte_mempool *message_pool;
+volatile int quit = 0;
+
+static int
+lcore_recv(__attribute__((unused)) void *arg)
+{
+       unsigned lcore_id = rte_lcore_id();
+
+       printf("Starting core %u\n", lcore_id);
+       while (!quit){
+               void *msg;
+               if (rte_ring_dequeue(recv_ring, &msg) < 0){
+                       usleep(5);
+                       continue;
+               }
+               printf("core %u: Received '%s'\n", lcore_id, (char *)msg);
+               rte_mempool_put(message_pool, msg);
+       }
+
+       return 0;
+}
+
+int
+main(int argc, char **argv)
+{
+       const unsigned flags = 0;
+       const unsigned ring_size = 64;
+       const unsigned pool_size = 1024;
+       const unsigned pool_cache = 32;
+       const unsigned priv_data_sz = 0;
+
+       int ret;
+       unsigned lcore_id;
+
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init EAL\n");
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+               send_ring = rte_ring_create(_PRI_2_SEC, ring_size, SOCKET0, flags);
+               recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, SOCKET0, flags);
+               message_pool = rte_mempool_create(_MSG_POOL, pool_size,
+                               string_size, pool_cache, priv_data_sz,
+                               NULL, NULL, NULL, NULL,
+                               SOCKET0, flags);
+       } else {
+               recv_ring = rte_ring_lookup(_PRI_2_SEC);
+               send_ring = rte_ring_lookup(_SEC_2_PRI);
+               message_pool = rte_mempool_lookup(_MSG_POOL);
+       }
+       if (send_ring == NULL)
+               rte_exit(EXIT_FAILURE, "Problem getting sending ring\n");
+       if (recv_ring == NULL)
+               rte_exit(EXIT_FAILURE, "Problem getting receiving ring\n");
+       if (message_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Problem getting message pool\n");
+
+       RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+       /* call lcore_recv() on every slave lcore */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(lcore_recv, NULL, lcore_id);
+       }
+
+       /* call cmd prompt on master lcore */
+       struct cmdline *cl = cmdline_stdin_new(simple_mp_ctx, "\nsimple_mp > ");
+       if (cl == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot create cmdline instance\n");
+       cmdline_interact(cl);
+       cmdline_stdin_exit(cl);
+
+       rte_eal_mp_wait_lcore();
+       return 0;
+}
diff --git a/examples/multi_process/simple_mp/mp_commands.c b/examples/multi_process/simple_mp/mp_commands.c
new file mode 100644 (file)
index 0000000..6e12ed3
--- /dev/null
@@ -0,0 +1,169 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <termios.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_launch.h>
+#include <rte_log.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_ring.h>
+#include <rte_debug.h>
+#include <rte_mempool.h>
+#include <rte_string_fns.h>
+
+#include <cmdline_rdline.h>
+#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
+#include <cmdline_socket.h>
+#include <cmdline.h>
+#include "mp_commands.h"
+
+/**********************************************************/
+
+struct cmd_send_result {
+       cmdline_fixed_string_t action;
+       cmdline_fixed_string_t message;
+};
+
+static void cmd_send_parsed(void *parsed_result,
+               __attribute__((unused)) struct cmdline *cl,
+               __attribute__((unused)) void *data)
+{
+       void *msg;
+       struct cmd_send_result *res = parsed_result;
+
+       if (rte_mempool_get(message_pool, &msg) < 0)
+               rte_panic("Failed to get message buffer\n");
+       rte_snprintf((char *)msg, string_size, "%s", res->message);
+       if (rte_ring_enqueue(send_ring, msg) < 0) {
+               printf("Failed to send message - message discarded\n");
+               rte_mempool_put(message_pool, msg);
+       }
+}
+
+cmdline_parse_token_string_t cmd_send_action =
+       TOKEN_STRING_INITIALIZER(struct cmd_send_result, action, "send");
+cmdline_parse_token_string_t cmd_send_message =
+       TOKEN_STRING_INITIALIZER(struct cmd_send_result, message, NULL);
+
+cmdline_parse_inst_t cmd_send = {
+       .f = cmd_send_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "send a string to another process",
+       .tokens = {        /* token list, NULL terminated */
+                       (void *)&cmd_send_action,
+                       (void *)&cmd_send_message,
+                       NULL,
+       },
+};
+
+/**********************************************************/
+
+struct cmd_quit_result {
+       cmdline_fixed_string_t quit;
+};
+
+static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
+                           struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       quit = 1;
+       cmdline_quit(cl);
+}
+
+cmdline_parse_token_string_t cmd_quit_quit =
+       TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
+
+cmdline_parse_inst_t cmd_quit = {
+       .f = cmd_quit_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "close the application",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_quit_quit,
+               NULL,
+       },
+};
+
+/**********************************************************/
+
+struct cmd_help_result {
+       cmdline_fixed_string_t help;
+};
+
+static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
+                           struct cmdline *cl,
+                           __attribute__((unused)) void *data)
+{
+       cmdline_printf(cl, "Simple demo example of multi-process in RTE\n\n"
+                       "This is a readline-like interface that can be used to\n"
+                       "send commands to the simple app. Commands supported are:\n\n"
+                       "- send [string]\n" "- help\n" "- quit\n\n");
+}
+
+cmdline_parse_token_string_t cmd_help_help =
+       TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
+
+cmdline_parse_inst_t cmd_help = {
+       .f = cmd_help_parsed,  /* function to call */
+       .data = NULL,      /* 2nd arg of func */
+       .help_str = "show help",
+       .tokens = {        /* token list, NULL terminated */
+               (void *)&cmd_help_help,
+               NULL,
+       },
+};
+
+/****** CONTEXT (list of instruction) */
+cmdline_parse_ctx_t simple_mp_ctx[] = {
+               (cmdline_parse_inst_t *)&cmd_send,
+               (cmdline_parse_inst_t *)&cmd_quit,
+               (cmdline_parse_inst_t *)&cmd_help,
+       NULL,
+};
diff --git a/examples/multi_process/simple_mp/mp_commands.h b/examples/multi_process/simple_mp/mp_commands.h
new file mode 100644 (file)
index 0000000..bdb25c2
--- /dev/null
@@ -0,0 +1,46 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _SIMPLE_MP_COMMANDS_H_
+#define _SIMPLE_MP_COMMANDS_H_
+
+extern const unsigned string_size;
+extern struct rte_ring *send_ring;
+extern struct rte_mempool *message_pool;
+extern volatile int quit;
+
+extern cmdline_parse_ctx_t simple_mp_ctx[];
+
+#endif /* _SIMPLE_MP_COMMANDS_H_ */
diff --git a/examples/multi_process/symmetric_mp/Makefile b/examples/multi_process/symmetric_mp/Makefile
new file mode 100644 (file)
index 0000000..5036bad
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = symmetric_mp
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
new file mode 100644 (file)
index 0000000..ad783f1
--- /dev/null
@@ -0,0 +1,471 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Sample application demostrating how to do packet I/O in a multi-process
+ * environment. The same code can be run as a primary process and as a
+ * secondary process, just with a different proc-id parameter in each case
+ * (apart from the EAL flag to indicate a secondary process).
+ *
+ * Each process will read from the same ports, given by the port-mask
+ * parameter, which should be the same in each case, just using a different
+ * queue per port as determined by the proc-id parameter.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <getopt.h>
+#include <signal.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_memcpy.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
+
+#define SOCKET0 0
+
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define NB_MBUFS 64*1024 /* use 64k mbufs */
+#define MBUF_CACHE_SIZE 256
+#define PKT_BURST 32
+#define RX_RING_SIZE 128
+#define TX_RING_SIZE 512
+
+#define PARAM_PROC_ID "proc-id"
+#define PARAM_NUM_PROCS "num-procs"
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+/* Default configuration for rx and tx thresholds etc. */
+static const struct rte_eth_rxconf rx_conf_default = {
+       .rx_thresh = {
+               .pthresh = 8,
+               .hthresh = 8,
+               .wthresh = 4,
+       },
+};
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+static const struct rte_eth_txconf tx_conf_default = {
+       .tx_thresh = {
+               .pthresh = 36,
+               .hthresh = 0,
+               .wthresh = 0,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+/* for each lcore, record the elements of the ports array to use */
+struct lcore_ports{
+       unsigned start_port;
+       unsigned num_ports;
+};
+
+/* structure to record the rx and tx packets. Put two per cache line as ports
+ * used in pairs */
+struct port_stats{
+       unsigned rx;
+       unsigned tx;
+       unsigned drop;
+} __attribute__((aligned(CACHE_LINE_SIZE / 2)));
+
+static int proc_id = -1;
+static unsigned num_procs = 0;
+
+static uint8_t ports[RTE_MAX_ETHPORTS];
+static unsigned num_ports = 0;
+
+static struct lcore_ports lcore_ports[RTE_MAX_LCORE];
+static struct port_stats pstats[RTE_MAX_ETHPORTS];
+
+/* prints the usage statement and quits with an error message */
+static void
+smp_usage(const char *prgname, const char *errmsg)
+{
+       printf("\nError: %s\n",errmsg);
+       printf("\n%s [EAL options] -- -p <port mask> "
+                       "--"PARAM_NUM_PROCS" <n>"
+                       " --"PARAM_PROC_ID" <id>\n"
+                       "-p         : a hex bitmask indicating what ports are to be used\n"
+                       "--num-procs: the number of processes which will be used\n"
+                       "--proc-id  : the id of the current process (id < num-procs)\n"
+                       "\n",
+                       prgname);
+       exit(1);
+}
+
+
+/* signal handler configured for SIGTERM and SIGINT to print stats on exit */
+static void
+print_stats(int signum)
+{
+       unsigned i;
+       printf("\nExiting on signal %d\n\n", signum);
+       for (i = 0; i < num_ports; i++){
+               const uint8_t p_num = ports[i];
+               printf("Port %u: RX - %u, TX - %u, Drop - %u\n", (unsigned)p_num,
+                               pstats[p_num].rx, pstats[p_num].tx, pstats[p_num].drop);
+       }
+       exit(0);
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+smp_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       unsigned i, port_mask = 0;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+                       {PARAM_NUM_PROCS, 1, 0, 0},
+                       {PARAM_PROC_ID, 1, 0, 0},
+                       {NULL, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       while ((opt = getopt_long(argc, argvopt, "p:", \
+                       lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               case 'p':
+                       port_mask = strtoull(optarg, NULL, 16);
+                       break;
+                       /* long options */
+               case 0:
+                       if (strncmp(lgopts[option_index].name, PARAM_NUM_PROCS, 8) == 0)
+                               num_procs = atoi(optarg);
+                       else if (strncmp(lgopts[option_index].name, PARAM_PROC_ID, 7) == 0)
+                               proc_id = atoi(optarg);
+                       break;
+
+               default:
+                       smp_usage(prgname, "Cannot parse all command-line arguments\n");
+               }
+       }
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       if (proc_id < 0)
+               smp_usage(prgname, "Invalid or missing proc-id parameter\n");
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY && num_procs == 0)
+               smp_usage(prgname, "Invalid or missing num-procs parameter\n");
+       if (port_mask == 0)
+               smp_usage(prgname, "Invalid or missing port mask\n");
+
+       /* get the port numbers from the port mask */
+       for(i = 0; i < rte_eth_dev_count(); i++)
+               if(port_mask & (1 << i))
+                       ports[num_ports++] = (uint8_t)i;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+
+       return (ret);
+}
+
+/* Queries the link status of a port and prints it to screen */
+static void
+report_link_status(uint8_t port)
+{
+       /* get link status */
+       struct rte_eth_link link;
+       rte_eth_link_get(port, &link);
+       if (link.link_status)
+               printf("Port %u: Link Up - %u Gbps - %s\n", (unsigned)port,
+                               (unsigned) link.link_speed / 1000,
+                               (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+                                               ("full-duplex") : ("half-duplex\n"));
+       else
+               printf("Port %u: Link Down\n", (unsigned)port);
+}
+
+/*
+ * Initialises a given port using global settings and with the rx buffers
+ * coming from the mbuf_pool passed as parameter
+ */
+static inline int
+smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
+{
+       struct rte_eth_conf port_conf = {
+                       .rxmode = {
+                               .mq_mode = ETH_RSS,
+                               .split_hdr_size = 0,
+                               .header_split   = 0, /**< Header Split disabled */
+                               .hw_ip_checksum = 1, /**< IP checksum offload enabled */
+                               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+                               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+                               .hw_strip_crc   = 0, /**< CRC stripped by hardware */
+                       },
+                       .rx_adv_conf = {
+                               .rss_conf = {
+                                       .rss_key = NULL,
+                                       .rss_hf = ETH_RSS_IPV4,
+                               },
+                       },
+                       .txmode = {
+                       }
+       };
+       const uint16_t rx_rings = num_queues, tx_rings = num_queues;
+       int retval;
+       uint16_t q;
+
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+               return 0;
+
+       if (port >= rte_eth_dev_count())
+               return -1;
+
+       printf("# Initialising port %u... ", (unsigned)port);
+       fflush(stdout);
+
+       retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+       if (retval < 0)
+               return retval;
+
+       for (q = 0; q < rx_rings; q ++) {
+               retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+                               SOCKET0, &rx_conf_default,
+                               mbuf_pool);
+               if (retval < 0)
+                       return retval;
+       }
+
+       for (q = 0; q < tx_rings; q ++) {
+               retval = rte_eth_tx_queue_setup(port, q, RX_RING_SIZE,
+                               SOCKET0, &tx_conf_default);
+               if (retval < 0)
+                       return retval;
+       }
+
+       rte_eth_promiscuous_enable(port);
+
+       retval  = rte_eth_dev_start(port);
+       if (retval < 0)
+               return retval;
+
+       return 0;
+}
+
+/* Goes through each of the lcores and calculates what ports should
+ * be used by that core. Fills in the global lcore_ports[] array.
+ */
+static void
+assign_ports_to_cores(void)
+{
+
+       const unsigned lcores = rte_eal_get_configuration()->lcore_count;
+       const unsigned port_pairs = num_ports / 2;
+       const unsigned pairs_per_lcore = port_pairs / lcores;
+       unsigned extra_pairs = port_pairs % lcores;
+       unsigned ports_assigned = 0;
+       unsigned i;
+
+       RTE_LCORE_FOREACH(i) {
+               lcore_ports[i].start_port = ports_assigned;
+               lcore_ports[i].num_ports = pairs_per_lcore * 2;
+               if (extra_pairs > 0) {
+                       lcore_ports[i].num_ports += 2;
+                       extra_pairs--;
+               }
+               ports_assigned += lcore_ports[i].num_ports;
+       }
+}
+
+/* Main function used by the processing threads.
+ * Prints out some configuration details for the thread and then begins
+ * performing packet RX and TX.
+ */
+static int
+lcore_main(void *arg __rte_unused)
+{
+       const unsigned id = rte_lcore_id();
+       const unsigned start_port = lcore_ports[id].start_port;
+       const unsigned end_port = start_port + lcore_ports[id].num_ports;
+       const uint16_t q_id = (uint16_t)proc_id;
+       unsigned p, i;
+       char msgbuf[256];
+       int msgbufpos = 0;
+
+       if (start_port == end_port){
+               printf("Lcore %u has nothing to do\n", id);
+               return 0;
+       }
+
+       /* build up message in msgbuf before printing to decrease likelihood
+        * of multi-core message interleaving.
+        */
+       msgbufpos += rte_snprintf(msgbuf, sizeof(msgbuf) - msgbufpos,
+                       "Lcore %u using ports ", id);
+       for (p = start_port; p < end_port; p++){
+               msgbufpos += rte_snprintf(msgbuf + msgbufpos, sizeof(msgbuf) - msgbufpos,
+                               "%u ", (unsigned)ports[p]);
+       }
+       printf("%s\n", msgbuf);
+       printf("lcore %u using queue %u of each port\n", id, (unsigned)q_id);
+
+       /* handle packet I/O from the ports, reading and writing to the
+        * queue number corresponding to our process number (not lcore id)
+        */
+
+       for (;;) {
+               struct rte_mbuf *buf[PKT_BURST];
+
+               for (p = start_port; p < end_port; p++) {
+                       const uint8_t src = ports[p];
+                       const uint8_t dst = ports[p ^ 1]; /* 0 <-> 1, 2 <-> 3 etc */
+                       const uint16_t rx_c = rte_eth_rx_burst(src, q_id, buf, PKT_BURST);
+                       if (rx_c == 0)
+                               continue;
+                       pstats[src].rx += rx_c;
+
+                       const uint16_t tx_c = rte_eth_tx_burst(dst, q_id, buf, rx_c);
+                       pstats[dst].tx += tx_c;
+                       if (tx_c != rx_c) {
+                               pstats[dst].drop += (rx_c - tx_c);
+                               for (i = tx_c; i < rx_c; i++)
+                                       rte_pktmbuf_free(buf[i]);
+                       }
+               }
+       }
+}
+
+/* Main function.
+ * Performs initialisation and then calls the lcore_main on each core
+ * to do the packet-processing work.
+ */
+int
+main(int argc, char **argv)
+{
+       static const char *_SMP_MBUF_POOL = "SMP_MBUF_POOL";
+       int ret;
+       unsigned i;
+       enum rte_proc_type_t proc_type;
+       struct rte_mempool *mp;
+
+       /* set up signal handlers to print stats on exit */
+       signal(SIGINT, print_stats);
+       signal(SIGTERM, print_stats);
+
+       /* initialise the EAL for all */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init EAL\n");
+       argc -= ret;
+       argv += ret;
+
+       /* probe to determine the NIC devices available */
+       proc_type = rte_eal_process_type();
+#ifdef RTE_LIBRTE_IGB_PMD
+       if (rte_igb_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init igb pmd\n");
+#endif
+#ifdef RTE_LIBRTE_IXGBE_PMD
+       if (rte_ixgbe_pmd_init() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot init ixgbe pmd\n");
+#endif
+       if (rte_eal_pci_probe() < 0)
+               rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
+       if (rte_eth_dev_count() == 0)
+               rte_exit(EXIT_FAILURE, "No Ethernet ports - bye\n");
+
+       /* parse application arguments (those after the EAL ones) */
+       smp_parse_args(argc, argv);
+
+       mp = (proc_type == RTE_PROC_SECONDARY) ?
+                       rte_mempool_lookup(_SMP_MBUF_POOL) :
+                       rte_mempool_create(_SMP_MBUF_POOL, NB_MBUFS, MBUF_SIZE,
+                                       MBUF_CACHE_SIZE, sizeof(struct rte_pktmbuf_pool_private),
+                                       rte_pktmbuf_pool_init, NULL,
+                                       rte_pktmbuf_init, NULL,
+                                       SOCKET0, 0);
+       if (mp == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot get memory pool for buffers\n");
+
+       if (num_ports & 1)
+               rte_exit(EXIT_FAILURE, "Application must use an even number of ports\n");
+       for(i = 0; i < num_ports; i++){
+               if(proc_type == RTE_PROC_PRIMARY)
+                       if (smp_port_init(ports[i], mp, (uint16_t)num_procs) < 0)
+                               rte_exit(EXIT_FAILURE, "Error initialising ports\n");
+               report_link_status(ports[i]);
+       }
+
+       assign_ports_to_cores();
+
+       RTE_LOG(INFO, APP, "Finished Process Init.\n");
+
+       rte_eal_mp_remote_launch(lcore_main, NULL, CALL_MASTER);
+
+       return 0;
+}
diff --git a/examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf b/examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..a8bf8fd
Binary files /dev/null and b/examples/timer/482254_Timer_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/timer/Makefile b/examples/timer/Makefile
new file mode 100644 (file)
index 0000000..f4db575
--- /dev/null
@@ -0,0 +1,58 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = timer
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/timer/main.c b/examples/timer/main.c
new file mode 100644 (file)
index 0000000..7ecfad1
--- /dev/null
@@ -0,0 +1,156 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_cycles.h>
+#include <rte_timer.h>
+#include <rte_debug.h>
+
+#include "main.h"
+
+#define TIMER_RESOLUTION_CYCLES 20000000ULL /* around 10ms at 2 Ghz */
+
+static struct rte_timer timer0;
+static struct rte_timer timer1;
+
+/* timer0 callback */
+static void
+timer0_cb(__attribute__((unused)) struct rte_timer *tim,
+         __attribute__((unused)) void *arg)
+{
+       static unsigned counter = 0;
+       unsigned lcore_id = rte_lcore_id();
+
+       printf("%s() on lcore %u\n", __func__, lcore_id);
+
+       /* this timer is automatically reloaded until we decide to
+        * stop it, when counter reaches 20. */
+       if ((counter ++) == 20)
+               rte_timer_stop(tim);
+}
+
+/* timer1 callback */
+static void
+timer1_cb(__attribute__((unused)) struct rte_timer *tim,
+         __attribute__((unused)) void *arg)
+{
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t hz;
+
+       printf("%s() on lcore %u\n", __func__, lcore_id);
+
+       /* reload it on another lcore */
+       hz = rte_get_hpet_hz();
+       lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
+       rte_timer_reset(tim, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
+}
+
+static __attribute__((noreturn)) int
+lcore_mainloop(__attribute__((unused)) void *arg)
+{
+       uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
+       unsigned lcore_id;
+
+       lcore_id = rte_lcore_id();
+       printf("Starting mainloop on core %u\n", lcore_id);
+
+       while (1) {
+               /*
+                * Call the timer handler on each core: as we don't
+                * need a very precise timer, so only call
+                * rte_timer_manage() every ~10ms (at 2Ghz). In a real
+                * application, this will enhance performances as
+                * reading the HPET timer is not efficient.
+                */
+               cur_tsc = rte_rdtsc();
+               diff_tsc = cur_tsc - prev_tsc;
+               if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
+                       rte_timer_manage();
+                       prev_tsc = cur_tsc;
+               }
+       }
+}
+
+int
+MAIN(int argc, char **argv)
+{
+       int ret;
+       uint64_t hz;
+       unsigned lcore_id;
+
+       /* init EAL */
+       ret = rte_eal_init(argc, argv);
+       if (ret < 0)
+               rte_panic("Cannot init EAL\n");
+
+       /* init RTE timer library */
+       rte_timer_subsystem_init();
+
+       /* init timer structures */
+       rte_timer_init(&timer0);
+       rte_timer_init(&timer1);
+
+       /* load timer0, every second, on master lcore, reloaded automatically */
+       hz = rte_get_hpet_hz();
+       lcore_id = rte_lcore_id();
+       rte_timer_reset(&timer0, hz, PERIODICAL, lcore_id, timer0_cb, NULL);
+
+       /* load timer1, every second/3, on next lcore, reloaded manually */
+       lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
+       rte_timer_reset(&timer1, hz/3, SINGLE, lcore_id, timer1_cb, NULL);
+
+       /* call lcore_mainloop() on every slave lcore */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(lcore_mainloop, NULL, lcore_id);
+       }
+
+       /* call it on master lcore too */
+       (void) lcore_mainloop(NULL);
+
+       return 0;
+}
diff --git a/examples/timer/main.h b/examples/timer/main.h
new file mode 100644 (file)
index 0000000..6027cb5
--- /dev/null
@@ -0,0 +1,47 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN(int argc, char **argv);
+
+#endif /* _MAIN_H_ */
diff --git a/examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf b/examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf
new file mode 100644 (file)
index 0000000..ab4b670
Binary files /dev/null and b/examples/vmdq_dcb/482255_VMDQ_DCB_L2Fwd_Sample_App_Guide_Rev1.1.pdf differ
diff --git a/examples/vmdq_dcb/Makefile b/examples/vmdq_dcb/Makefile
new file mode 100644 (file)
index 0000000..82b1981
--- /dev/null
@@ -0,0 +1,59 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overriden by command line or environment
+RTE_TARGET ?= x86_64-default-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = vmdq_dcb_app
+
+# all source are stored in SRCS-y
+SRCS-y := main.c
+
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+EXTRA_CFLAGS += -O3 -g
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
new file mode 100644 (file)
index 0000000..634ebc5
--- /dev/null
@@ -0,0 +1,331 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <sys/queue.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_launch.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_prefetch.h>
+#include <rte_lcore.h>
+#include <rte_per_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_random.h>
+#include <rte_debug.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ring.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+
+#include "main.h"
+
+/* basic constants used in application */
+#define SOCKET0 0
+#define SOCKET1 1
+
+#define NUM_QUEUES 128
+
+#define NUM_MBUFS 64*1024
+#define MBUF_CACHE_SIZE 64
+#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+
+/* Basic application settings */
+#define NUM_POOLS ETH_16_POOLS /* can be ETH_16_POOLS or ETH_32_POOLS */
+
+#define RX_PORT 0
+#define TX_PORT 1
+
+/*
+ * RX and TX Prefetch, Host, and Write-back threshold values should be
+ * carefully set for optimal performance. Consult the network
+ * controller's datasheet and supporting DPDK documentation for guidance
+ * on how these parameters should be set.
+ */
+/* Default configuration for rx and tx thresholds etc. */
+static const struct rte_eth_rxconf rx_conf_default = {
+       .rx_thresh = {
+               .pthresh = 8,
+               .hthresh = 8,
+               .wthresh = 4,
+       },
+};
+
+/*
+ * These default values are optimized for use with the Intel(R) 82599 10 GbE
+ * Controller and the DPDK ixgbe PMD. Consider using other values for other
+ * network controllers and/or network drivers.
+ */
+static const struct rte_eth_txconf tx_conf_default = {
+       .tx_thresh = {
+               .pthresh = 36,
+               .hthresh = 0,
+               .wthresh = 0,
+       },
+       .tx_free_thresh = 0, /* Use PMD default values */
+       .tx_rs_thresh = 0, /* Use PMD default values */
+};
+
+/* empty vmdq+dcb configuration structure. Filled in programatically */
+static const struct rte_eth_conf vmdq_dcb_conf_default = {
+       .rxmode = {
+               .mq_mode        = ETH_VMDQ_DCB,
+               .split_hdr_size = 0,
+               .header_split   = 0, /**< Header Split disabled */
+               .hw_ip_checksum = 0, /**< IP checksum offload disabled */
+               .hw_vlan_filter = 0, /**< VLAN filtering disabled */
+               .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
+       },
+       .txmode = {
+       },
+       .rx_adv_conf = {
+               /*
+                * should be overridden separately in code with
+                * appropriate values
+                */
+               .vmdq_dcb_conf = {
+                       .nb_queue_pools = NUM_POOLS,
+                       .enable_default_pool = 0,
+                       .default_pool = 0,
+                       .nb_pool_maps = 0,
+                       .pool_map = {{0, 0},},
+                       .dcb_queue = {0},
+               },
+       },
+};
+
+/* array used for printing out statistics */
+volatile unsigned long rxPackets[ NUM_QUEUES ] = {0};
+
+const uint16_t vlan_tags[] = {
+       0,  1,  2,  3,  4,  5,  6,  7,
+       8,  9, 10, 11,  12, 13, 14, 15,
+       16, 17, 18, 19, 20, 21, 22, 23,
+       24, 25, 26, 27, 28, 29, 30, 31
+};
+
+/* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
+ * given above, and the number of traffic classes available for use. */
+static inline int
+get_eth_conf(struct rte_eth_conf *eth_conf, enum rte_eth_nb_pools num_pools)
+{
+       struct rte_eth_vmdq_dcb_conf conf;
+       unsigned i;
+
+       if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS ) return -1;
+
+       conf.nb_queue_pools = num_pools;
+       conf.enable_default_pool = 0;
+       conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
+       for (i = 0; i < conf.nb_pool_maps; i++){
+               conf.pool_map[i].vlan_id = vlan_tags[ i ];
+               conf.pool_map[i].pools = 1 << (i % num_pools);
+       }
+       for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
+               conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
+       }
+       rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf));
+       rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
+                  sizeof(eth_conf->rx_adv_conf.vmdq_dcb_conf));
+       return 0;
+}
+
+/*
+ * Initialises a given port using global settings and with the rx buffers
+ * coming from the mbuf_pool passed as parameter
+ */
+static inline int
+port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+{
+       struct rte_eth_conf port_conf;
+       const uint16_t rxRings = ETH_VMDQ_DCB_NUM_QUEUES,
+               txRings = (uint16_t)rte_lcore_count();
+       const uint16_t rxRingSize = 128, txRingSize = 512;
+       int retval;
+       uint16_t q;
+
+       get_eth_conf(&port_conf, NUM_POOLS);
+
+       if (port >= rte_eth_dev_count()) return -1;
+
+       retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
+       if (retval != 0)
+               return retval;
+
+       for (q = 0; q < rxRings; q ++) {
+               retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
+                                               SOCKET0, &rx_conf_default,
+                                               mbuf_pool);
+               if (retval < 0)
+                       return retval;
+       }
+
+       for (q = 0; q < txRings; q ++) {
+               retval = rte_eth_tx_queue_setup(port, q, txRingSize,
+                                               SOCKET0, &tx_conf_default);
+               if (retval < 0)
+                       return retval;
+       }
+
+       retval  = rte_eth_dev_start(port);
+       if (retval < 0)
+               return retval;
+
+       return 0;
+}
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+/* When we receive a HUP signal, print out our stats */
+static void
+sighup_handler(int signum)
+{
+       unsigned q;
+       for (q = 0; q < NUM_QUEUES; q ++) {
+               if (q % (NUM_QUEUES/NUM_POOLS) == 0)
+                       printf("\nPool %u: ", q/(NUM_QUEUES/NUM_POOLS));
+               printf("%lu ", rxPackets[ q ]);
+       }
+       printf("\nFinished handling signal %d\n", signum);
+}
+#endif
+
+/*
+ * Main thread that does the work, reading from INPUT_PORT
+ * and writing to OUTPUT_PORT
+ */
+static  __attribute__((noreturn)) int
+lcore_main(void *arg)
+{
+       const uintptr_t core_num = (uintptr_t)arg;
+       const unsigned num_cores = rte_lcore_count();
+       uint16_t startQueue = (uint16_t)(core_num * (NUM_QUEUES/num_cores));
+       uint16_t endQueue = (uint16_t)(startQueue + (NUM_QUEUES/num_cores));
+       uint16_t q, i;
+
+       printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
+              rte_lcore_id(), startQueue, endQueue - 1);
+
+       for (;;) {
+               struct rte_mbuf *buf[32];
+               const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
+
+               for (q = startQueue; q < endQueue; q++) {
+                       const uint16_t rxCount = rte_eth_rx_burst(RX_PORT,
+                                       q, buf, buf_size);
+                       if (rxCount == 0)
+                               continue;
+                       rxPackets[q] += rxCount;
+
+                       const uint16_t txCount = rte_eth_tx_burst(TX_PORT,
+                                       (uint16_t)core_num, buf, rxCount);
+                       if (txCount != rxCount) {
+                               for (i = txCount; i < rxCount; i++)
+                                       rte_pktmbuf_free(buf[i]);
+                       }
+               }
+       }
+}
+
+/* Main function, does initialisation and calls the per-lcore functions */
+int
+MAIN(int argc, char *argv[])
+{
+       unsigned cores;
+       struct rte_mempool *mbuf_pool;
+       unsigned lcore_id;
+       uintptr_t i;
+
+#ifndef RTE_EXEC_ENV_BAREMETAL
+       signal(SIGHUP, sighup_handler);
+#endif
+
+       if (rte_eal_init(argc, argv) < 0)
+               rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+       if (rte_igb_pmd_init() != 0 ||
+                       rte_ixgbe_pmd_init() != 0 ||
+                       rte_eal_pci_probe() != 0)
+               rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n");
+
+       cores = rte_lcore_count();
+       if ((cores & (cores - 1)) != 0 || cores > 16) {
+               rte_exit(EXIT_FAILURE,
+                        "This program can only run on 2,4,8 or 16 cores\n\n");
+       }
+
+       mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS,
+                                      MBUF_SIZE, MBUF_CACHE_SIZE,
+                                      sizeof(struct rte_pktmbuf_pool_private),
+                                      rte_pktmbuf_pool_init, NULL,
+                                      rte_pktmbuf_init, NULL,
+                                      SOCKET0, 0);
+       if (mbuf_pool == NULL)
+               rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+       if (port_init(RX_PORT, mbuf_pool) != 0 ||
+           port_init(TX_PORT, mbuf_pool) != 0)
+               rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
+
+       /* call lcore_main() on every slave lcore */
+       i = 0;
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
+       }
+       /* call on master too */
+       (void) lcore_main((void*)i);
+
+       return 0;
+}
diff --git a/examples/vmdq_dcb/main.h b/examples/vmdq_dcb/main.h
new file mode 100644 (file)
index 0000000..ad1b4b3
--- /dev/null
@@ -0,0 +1,48 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _MAIN_H_
+#define _MAIN_H_
+
+
+#ifdef RTE_EXEC_ENV_BAREMETAL
+#define MAIN _main
+#else
+#define MAIN main
+#endif
+
+int MAIN( int argc, char *argv[] );
+
+#endif /* ifndef _MAIN_H_ */
diff --git a/lib/Makefile b/lib/Makefile
new file mode 100644 (file)
index 0000000..4cf1892
--- /dev/null
@@ -0,0 +1,51 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBC) += libc
+DIRS-$(CONFIG_RTE_LIBRTE_EAL) += librte_eal
+DIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += librte_malloc
+DIRS-$(CONFIG_RTE_LIBRTE_RING) += librte_ring
+DIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += librte_mempool
+DIRS-$(CONFIG_RTE_LIBRTE_MBUF) += librte_mbuf
+DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += librte_timer
+DIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += librte_cmdline
+DIRS-$(CONFIG_RTE_LIBRTE_ETHER) += librte_ether
+DIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += librte_pmd_igb
+DIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += librte_pmd_ixgbe
+DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
+DIRS-$(CONFIG_RTE_LIBRTE_LPM) += librte_lpm
+DIRS-$(CONFIG_RTE_LIBRTE_NET) += librte_net
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_cmdline/Makefile b/lib/librte_cmdline/Makefile
new file mode 100644 (file)
index 0000000..faeb9f3
--- /dev/null
@@ -0,0 +1,65 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_cmdline.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) := cmdline.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_cirbuf.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_etheraddr.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_ipaddr.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_num.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_string.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_rdline.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_vt100.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_socket.c
+SRCS-$(CONFIG_RTE_LIBRTE_CMDLINE) += cmdline_parse_portlist.c
+
+CFLAGS_cmdline.o := -D_GNU_SOURCE
+
+# install includes
+INCS := cmdline.h cmdline_parse.h cmdline_parse_num.h cmdline_parse_ipaddr.h
+INCS += cmdline_parse_etheraddr.h cmdline_parse_string.h cmdline_rdline.h
+INCS += cmdline_vt100.h cmdline_socket.h cmdline_cirbuf.h cmdline_parse_portlist.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_CMDLINE)-include := $(INCS)
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_CMDLINE) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_cmdline/cmdline.c b/lib/librte_cmdline/cmdline.c
new file mode 100644 (file)
index 0000000..dafddfc
--- /dev/null
@@ -0,0 +1,240 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <termios.h>
+#include <netinet/in.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_rdline.h"
+#include "cmdline.h"
+
+static void
+cmdline_valid_buffer(struct rdline *rdl, const char *buf,
+                    __attribute__((unused)) unsigned int size)
+{
+       struct cmdline *cl = rdl->opaque;
+       int ret;
+       ret = cmdline_parse(cl, buf);
+       if (ret == CMDLINE_PARSE_AMBIGUOUS)
+               cmdline_printf(cl, "Ambiguous command\n");
+       else if (ret == CMDLINE_PARSE_NOMATCH)
+               cmdline_printf(cl, "Command not found\n");
+       else if (ret == CMDLINE_PARSE_BAD_ARGS)
+               cmdline_printf(cl, "Bad arguments\n");
+}
+
+static int
+cmdline_complete_buffer(struct rdline *rdl, const char *buf,
+                       char *dstbuf, unsigned int dstsize,
+                       int *state)
+{
+       struct cmdline *cl = rdl->opaque;
+       return cmdline_complete(cl, buf, state, dstbuf, dstsize);
+}
+
+int
+cmdline_write_char(struct rdline *rdl, char c)
+{
+       int ret = -1;
+       struct cmdline *cl = rdl->opaque;
+
+       if (cl->s_out >= 0)
+               ret = write(cl->s_out, &c, 1);
+
+       return ret;
+}
+
+
+void
+cmdline_set_prompt(struct cmdline *cl, const char *prompt)
+{
+       rte_snprintf(cl->prompt, sizeof(cl->prompt), "%s", prompt);
+}
+
+struct cmdline *
+cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out)
+{
+       struct cmdline *cl;
+       cl = malloc(sizeof(struct cmdline));
+       if (cl == NULL)
+               return NULL;
+       memset(cl, 0, sizeof(struct cmdline));
+       cl->s_in = s_in;
+       cl->s_out = s_out;
+       cl->ctx = ctx;
+
+       rdline_init(&cl->rdl, cmdline_write_char,
+                   cmdline_valid_buffer, cmdline_complete_buffer);
+       cl->rdl.opaque = cl;
+       cmdline_set_prompt(cl, prompt);
+       rdline_newline(&cl->rdl, cl->prompt);
+
+       return cl;
+}
+
+void
+cmdline_free(struct cmdline *cl)
+{
+       dprintf("called\n");
+       if (cl->s_in > 2)
+               close(cl->s_in);
+       if (cl->s_out != cl->s_in && cl->s_out > 2)
+               close(cl->s_out);
+       free(cl);
+}
+
+void
+cmdline_printf(const struct cmdline *cl, const char *fmt, ...)
+{
+       va_list ap;
+
+#ifdef _GNU_SOURCE
+       if (cl->s_out < 0)
+               return;
+       va_start(ap, fmt);
+       vdprintf(cl->s_out, fmt, ap);
+       va_end(ap);
+#else
+       int ret;
+       char *buf;
+
+       if (cl->s_out < 0)
+               return;
+
+       buf = malloc(BUFSIZ);
+       if (buf == NULL)
+               return;
+       va_start(ap, fmt);
+       ret = vsnprintf(buf, BUFSIZ, fmt, ap);
+       va_end(ap);
+       if (ret < 0)
+               return;
+       if (ret >= BUFSIZ)
+               ret = BUFSIZ - 1;
+       write(cl->s_out, buf, ret);
+       free(buf);
+#endif
+}
+
+int
+cmdline_in(struct cmdline *cl, const char *buf, int size)
+{
+       const char *history, *buffer;
+       size_t histlen, buflen;
+       int ret = 0;
+       int i, same;
+
+       for (i=0; i<size; i++) {
+               ret = rdline_char_in(&cl->rdl, buf[i]);
+
+               if (ret == RDLINE_RES_VALIDATED) {
+                       buffer = rdline_get_buffer(&cl->rdl);
+                       history = rdline_get_history_item(&cl->rdl, 0);
+                       if (history) {
+                               histlen = strnlen(history, RDLINE_BUF_SIZE);
+                               same = !memcmp(buffer, history, histlen) &&
+                                       buffer[histlen] == '\n';
+                       }
+                       else
+                               same = 0;
+                       buflen = strnlen(buffer, RDLINE_BUF_SIZE);
+                       if (buflen > 1 && !same)
+                               rdline_add_history(&cl->rdl, buffer);
+                       rdline_newline(&cl->rdl, cl->prompt);
+               }
+               else if (ret == RDLINE_RES_EOF)
+                       return -1;
+               else if (ret == RDLINE_RES_EXITED)
+                       return -1;
+       }
+       return i;
+}
+
+void
+cmdline_quit(struct cmdline *cl)
+{
+       rdline_quit(&cl->rdl);
+}
+
+void
+cmdline_interact(struct cmdline *cl)
+{
+       char c;
+
+       c = -1;
+       while (1) {
+               if (read(cl->s_in, &c, 1) < 0)
+                       break;
+               if (cmdline_in(cl, &c, 1) < 0)
+                       break;
+       }
+}
diff --git a/lib/librte_cmdline/cmdline.h b/lib/librte_cmdline/cmdline.h
new file mode 100644 (file)
index 0000000..5754afe
--- /dev/null
@@ -0,0 +1,94 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_H_
+#define _CMDLINE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline {
+       int s_in;
+       int s_out;
+       cmdline_parse_ctx_t *ctx;
+       struct rdline rdl;
+       char prompt[RDLINE_PROMPT_SIZE];
+#ifdef RTE_EXEC_ENV_LINUXAPP
+       struct termios oldterm;
+#endif
+};
+
+struct cmdline *cmdline_new(cmdline_parse_ctx_t *ctx, const char *prompt, int s_in, int s_out);
+void cmdline_set_prompt(struct cmdline *cl, const char *prompt);
+void cmdline_free(struct cmdline *cl);
+void cmdline_printf(const struct cmdline *cl, const char *fmt, ...);
+int cmdline_in(struct cmdline *cl, const char *buf, int size);
+int cmdline_write_char(struct rdline *rdl, char c);
+void cmdline_interact(struct cmdline *cl);
+void cmdline_quit(struct cmdline *cl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/lib/librte_cmdline/cmdline_cirbuf.c b/lib/librte_cmdline/cmdline_cirbuf.c
new file mode 100644 (file)
index 0000000..ccc51fc
--- /dev/null
@@ -0,0 +1,434 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+#include <errno.h>
+
+#include "cmdline_cirbuf.h"
+
+
+void
+cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen)
+{
+       cbuf->maxlen = maxlen;
+       cbuf->len = 0;
+       cbuf->start = start;
+       cbuf->end = start;
+       cbuf->buf = buf;
+}
+
+/* multiple add */
+
+int
+cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n)
+{
+       unsigned int e;
+
+       if (!n || n > CIRBUF_GET_FREELEN(cbuf))
+               return -EINVAL;
+
+       e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
+
+       if (n < cbuf->start + e) {
+               dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->start - n + e, n);
+               memcpy(cbuf->buf + cbuf->start - n + e, c, n);
+       }
+       else {
+               dprintf("s[%d] -> d[%d] (%d)\n", + n - (cbuf->start + e), 0,
+                       cbuf->start + e);
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - n +
+                       (cbuf->start + e), 0, n - (cbuf->start + e));
+               memcpy(cbuf->buf, c  + n - (cbuf->start + e) , cbuf->start + e);
+               memcpy(cbuf->buf + cbuf->maxlen - n + (cbuf->start + e), c,
+                      n - (cbuf->start + e));
+       }
+       cbuf->len += n;
+       cbuf->start += (cbuf->maxlen - n + e);
+       cbuf->start %= cbuf->maxlen;
+       return n;
+}
+
+/* multiple add */
+
+int
+cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n)
+{
+       unsigned int e;
+
+       if (!n || n > CIRBUF_GET_FREELEN(cbuf))
+               return -EINVAL;
+
+       e = CIRBUF_IS_EMPTY(cbuf) ? 1 : 0;
+
+       if (n < cbuf->maxlen - cbuf->end - 1 + e) {
+               dprintf("s[%d] -> d[%d] (%d)\n", 0, cbuf->end + !e, n);
+               memcpy(cbuf->buf + cbuf->end + !e, c, n);
+       }
+       else {
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end + !e, 0,
+                       cbuf->maxlen - cbuf->end - 1 + e);
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->maxlen - cbuf->end - 1 +
+                       e, 0, n - cbuf->maxlen + cbuf->end + 1 - e);
+               memcpy(cbuf->buf + cbuf->end + !e, c, cbuf->maxlen -
+                      cbuf->end - 1 + e);
+               memcpy(cbuf->buf, c + cbuf->maxlen - cbuf->end - 1 + e,
+                      n - cbuf->maxlen + cbuf->end + 1 - e);
+       }
+       cbuf->len += n;
+       cbuf->end += n - e;
+       cbuf->end %= cbuf->maxlen;
+       return n;
+}
+
+/* add at head */
+
+static inline void
+__cirbuf_add_head(struct cirbuf * cbuf, char c)
+{
+       if (!CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->start += (cbuf->maxlen - 1);
+               cbuf->start %= cbuf->maxlen;
+       }
+       cbuf->buf[cbuf->start] = c;
+       cbuf->len ++;
+}
+
+int
+cirbuf_add_head_safe(struct cirbuf * cbuf, char c)
+{
+       if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
+               __cirbuf_add_head(cbuf, c);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+void
+cirbuf_add_head(struct cirbuf * cbuf, char c)
+{
+       __cirbuf_add_head(cbuf, c);
+}
+
+/* add at tail */
+
+static inline void
+__cirbuf_add_tail(struct cirbuf * cbuf, char c)
+{
+       if (!CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->end ++;
+               cbuf->end %= cbuf->maxlen;
+       }
+       cbuf->buf[cbuf->end] = c;
+       cbuf->len ++;
+}
+
+int
+cirbuf_add_tail_safe(struct cirbuf * cbuf, char c)
+{
+       if (cbuf && !CIRBUF_IS_FULL(cbuf)) {
+               __cirbuf_add_tail(cbuf, c);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+void
+cirbuf_add_tail(struct cirbuf * cbuf, char c)
+{
+       __cirbuf_add_tail(cbuf, c);
+}
+
+
+static inline void
+__cirbuf_shift_left(struct cirbuf *cbuf)
+{
+       unsigned int i;
+       char tmp = cbuf->buf[cbuf->start];
+
+       for (i=0 ; i<cbuf->len ; i++) {
+               cbuf->buf[(cbuf->start+i)%cbuf->maxlen] =
+                       cbuf->buf[(cbuf->start+i+1)%cbuf->maxlen];
+       }
+       cbuf->buf[(cbuf->start-1+cbuf->maxlen)%cbuf->maxlen] = tmp;
+       cbuf->start += (cbuf->maxlen - 1);
+       cbuf->start %= cbuf->maxlen;
+       cbuf->end += (cbuf->maxlen - 1);
+       cbuf->end %= cbuf->maxlen;
+}
+
+static inline void
+__cirbuf_shift_right(struct cirbuf *cbuf)
+{
+       unsigned int i;
+       char tmp = cbuf->buf[cbuf->end];
+
+       for (i=0 ; i<cbuf->len ; i++) {
+               cbuf->buf[(cbuf->end+cbuf->maxlen-i)%cbuf->maxlen] =
+                       cbuf->buf[(cbuf->end+cbuf->maxlen-i-1)%cbuf->maxlen];
+       }
+       cbuf->buf[(cbuf->end+1)%cbuf->maxlen] = tmp;
+       cbuf->start += 1;
+       cbuf->start %= cbuf->maxlen;
+       cbuf->end += 1;
+       cbuf->end %= cbuf->maxlen;
+}
+
+/* XXX we could do a better algorithm here... */
+void cirbuf_align_left(struct cirbuf * cbuf)
+{
+       if (cbuf->start < cbuf->maxlen/2) {
+               while (cbuf->start != 0) {
+                       __cirbuf_shift_left(cbuf);
+               }
+       }
+       else {
+               while (cbuf->start != 0) {
+                       __cirbuf_shift_right(cbuf);
+               }
+       }
+}
+
+/* XXX we could do a better algorithm here... */
+void cirbuf_align_right(struct cirbuf * cbuf)
+{
+       if (cbuf->start >= cbuf->maxlen/2) {
+               while (cbuf->end != cbuf->maxlen-1) {
+                       __cirbuf_shift_left(cbuf);
+               }
+       }
+       else {
+               while (cbuf->start != cbuf->maxlen-1) {
+                       __cirbuf_shift_right(cbuf);
+               }
+       }
+}
+
+/* buffer del */
+
+int
+cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size)
+{
+       if (!size || size > CIRBUF_GET_LEN(cbuf))
+               return -EINVAL;
+
+       cbuf->len -= size;
+       if (CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->start += size - 1;
+               cbuf->start %= cbuf->maxlen;
+       }
+       else {
+               cbuf->start += size;
+               cbuf->start %= cbuf->maxlen;
+       }
+       return 0;
+}
+
+/* buffer del */
+
+int
+cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size)
+{
+       if (!size || size > CIRBUF_GET_LEN(cbuf))
+               return -EINVAL;
+
+       cbuf->len -= size;
+       if (CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->end  += (cbuf->maxlen - size + 1);
+               cbuf->end %= cbuf->maxlen;
+       }
+       else {
+               cbuf->end  += (cbuf->maxlen - size);
+               cbuf->end %= cbuf->maxlen;
+       }
+       return 0;
+}
+
+/* del at head */
+
+static inline void
+__cirbuf_del_head(struct cirbuf * cbuf)
+{
+       cbuf->len --;
+       if (!CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->start ++;
+               cbuf->start %= cbuf->maxlen;
+       }
+}
+
+int
+cirbuf_del_head_safe(struct cirbuf * cbuf)
+{
+       if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
+               __cirbuf_del_head(cbuf);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+void
+cirbuf_del_head(struct cirbuf * cbuf)
+{
+       __cirbuf_del_head(cbuf);
+}
+
+/* del at tail */
+
+static inline void
+__cirbuf_del_tail(struct cirbuf * cbuf)
+{
+       cbuf->len --;
+       if (!CIRBUF_IS_EMPTY(cbuf)) {
+               cbuf->end  += (cbuf->maxlen - 1);
+               cbuf->end %= cbuf->maxlen;
+       }
+}
+
+int
+cirbuf_del_tail_safe(struct cirbuf * cbuf)
+{
+       if (cbuf && !CIRBUF_IS_EMPTY(cbuf)) {
+               __cirbuf_del_tail(cbuf);
+               return 0;
+       }
+       return -EINVAL;
+}
+
+void
+cirbuf_del_tail(struct cirbuf * cbuf)
+{
+       __cirbuf_del_tail(cbuf);
+}
+
+/* convert to buffer */
+
+int
+cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size)
+{
+       unsigned int n;
+
+       n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
+
+       if (!n)
+               return 0;
+
+       if (cbuf->start <= cbuf->end) {
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0, n);
+               memcpy(c, cbuf->buf + cbuf->start , n);
+       }
+       else {
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->start, 0,
+                       cbuf->maxlen - cbuf->start);
+               dprintf("s[%d] -> d[%d] (%d)\n", 0,cbuf->maxlen - cbuf->start,
+                       n - cbuf->maxlen + cbuf->start);
+               memcpy(c, cbuf->buf + cbuf->start , cbuf->maxlen - cbuf->start);
+               memcpy(c + cbuf->maxlen - cbuf->start, cbuf->buf,
+                      n - cbuf->maxlen + cbuf->start);
+       }
+       return n;
+}
+
+/* convert to buffer */
+
+int
+cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size)
+{
+       unsigned int n;
+
+       n = (size < CIRBUF_GET_LEN(cbuf)) ? size : CIRBUF_GET_LEN(cbuf);
+
+       if (!n)
+               return 0;
+
+       if (cbuf->start <= cbuf->end) {
+               dprintf("s[%d] -> d[%d] (%d)\n", cbuf->end - n + 1, 0, n);
+               memcpy(c, cbuf->buf + cbuf->end - n + 1, n);
+       }
+       else {
+               dprintf("s[%d] -> d[%d] (%d)\n", 0,
+                       cbuf->maxlen - cbuf->start, cbuf->end + 1);
+               dprintf("s[%d] -> d[%d] (%d)\n",
+                       cbuf->maxlen - n + cbuf->end + 1, 0, n - cbuf->end - 1);
+
+               memcpy(c + cbuf->maxlen - cbuf->start,
+                      cbuf->buf, cbuf->end + 1);
+               memcpy(c, cbuf->buf + cbuf->maxlen - n + cbuf->end +1,
+                      n - cbuf->end - 1);
+       }
+       return n;
+}
+
+/* get head or get tail */
+
+char
+cirbuf_get_head(struct cirbuf * cbuf)
+{
+       return cbuf->buf[cbuf->start];
+}
+
+/* get head or get tail */
+
+char
+cirbuf_get_tail(struct cirbuf * cbuf)
+{
+       return cbuf->buf[cbuf->end];
+}
+
diff --git a/lib/librte_cmdline/cmdline_cirbuf.h b/lib/librte_cmdline/cmdline_cirbuf.h
new file mode 100644 (file)
index 0000000..f934292
--- /dev/null
@@ -0,0 +1,248 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CIRBUF_H_
+#define _CIRBUF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This structure is the header of a cirbuf type.
+ */
+struct cirbuf {
+       unsigned int maxlen;    /**< total len of the fifo (number of elements) */
+       unsigned int start;     /**< indice of the first elt */
+       unsigned int end;       /**< indice of the last elt */
+       unsigned int len;       /**< current len of fifo */
+       char *buf;
+};
+
+/* #define CIRBUF_DEBUG */
+
+#ifdef CIRBUF_DEBUG
+#define dprintf(fmt, ...) printf("line %3.3d - " fmt, __LINE__, ##__VA_ARGS__)
+#else
+#define dprintf(args...) do {} while(0)
+#endif
+
+
+/**
+ * Init the circular buffer
+ */
+void cirbuf_init(struct cirbuf *cbuf, char *buf, unsigned int start, unsigned int maxlen);
+
+
+/**
+ * Return 1 if the circular buffer is full
+ */
+#define CIRBUF_IS_FULL(cirbuf) ((cirbuf)->maxlen == (cirbuf)->len)
+
+/**
+ * Return 1 if the circular buffer is empty
+ */
+#define CIRBUF_IS_EMPTY(cirbuf) ((cirbuf)->len == 0)
+
+/**
+ * return current size of the circular buffer (number of used elements)
+ */
+#define CIRBUF_GET_LEN(cirbuf) ((cirbuf)->len)
+
+/**
+ * return size of the circular buffer (used + free elements)
+ */
+#define CIRBUF_GET_MAXLEN(cirbuf) ((cirbuf)->maxlen)
+
+/**
+ * return the number of free elts
+ */
+#define CIRBUF_GET_FREELEN(cirbuf) ((cirbuf)->maxlen - (cirbuf)->len)
+
+/**
+ * Iterator for a circular buffer
+ *   c: struct cirbuf pointer
+ *   i: an integer type internally used in the macro
+ *   e: char that takes the value for each iteration
+ */
+#define CIRBUF_FOREACH(c, i, e)                                 \
+       for ( i=0, e=(c)->buf[(c)->start] ;                     \
+               i<((c)->len) ;                                  \
+               i ++,  e=(c)->buf[((c)->start+i)%((c)->maxlen)])
+
+
+/**
+ * Add a character at head of the circular buffer. Return 0 on success, or
+ * a negative value on error.
+ */
+int cirbuf_add_head_safe(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at head of the circular buffer. You _must_ check that you
+ * have enough free space in the buffer before calling this func.
+ */
+void cirbuf_add_head(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at tail of the circular buffer. Return 0 on success, or
+ * a negative value on error.
+ */
+int cirbuf_add_tail_safe(struct cirbuf *cbuf, char c);
+
+/**
+ * Add a character at tail of the circular buffer. You _must_ check that you
+ * have enough free space in the buffer before calling this func.
+ */
+void cirbuf_add_tail(struct cirbuf *cbuf, char c);
+
+/**
+ * Remove a char at the head of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_head_safe(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the head of the circular buffer. You _must_ check
+ * that buffer is not empty before calling the function.
+ */
+void cirbuf_del_head(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the tail of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_tail_safe(struct cirbuf *cbuf);
+
+/**
+ * Remove a char at the tail of the circular buffer. You _must_ check
+ * that buffer is not empty before calling the function.
+ */
+void cirbuf_del_tail(struct cirbuf *cbuf);
+
+/**
+ * Return the head of the circular buffer. You _must_ check that
+ * buffer is not empty before calling the function.
+ */
+char cirbuf_get_head(struct cirbuf *cbuf);
+
+/**
+ * Return the tail of the circular buffer. You _must_ check that
+ * buffer is not empty before calling the function.
+ */
+char cirbuf_get_tail(struct cirbuf *cbuf);
+
+/**
+ * Add a buffer at head of the circular buffer. 'c' is a pointer to a
+ * buffer, and n is the number of char to add. Return the number of
+ * copied bytes on success, or a negative value on error.
+ */
+int cirbuf_add_buf_head(struct cirbuf *cbuf, const char *c, unsigned int n);
+
+/**
+ * Add a buffer at tail of the circular buffer. 'c' is a pointer to a
+ * buffer, and n is the number of char to add. Return the number of
+ * copied bytes on success, or a negative value on error.
+ */
+int cirbuf_add_buf_tail(struct cirbuf *cbuf, const char *c, unsigned int n);
+
+/**
+ * Remove chars at the head of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_buf_head(struct cirbuf *cbuf, unsigned int size);
+
+/**
+ * Remove chars at the tail of the circular buffer. Return 0 on
+ * success, or a negative value on error.
+ */
+int cirbuf_del_buf_tail(struct cirbuf *cbuf, unsigned int size);
+
+/**
+ * Copy a maximum of 'size' characters from the head of the circular
+ * buffer to a flat one pointed by 'c'. Return the number of copied
+ * chars.
+ */
+int cirbuf_get_buf_head(struct cirbuf *cbuf, char *c, unsigned int size);
+
+/**
+ * Copy a maximum of 'size' characters from the tail of the circular
+ * buffer to a flat one pointed by 'c'. Return the number of copied
+ * chars.
+ */
+int cirbuf_get_buf_tail(struct cirbuf *cbuf, char *c, unsigned int size);
+
+
+/**
+ * Set the start of the data to the index 0 of the internal buffer.
+ */
+void cirbuf_align_left(struct cirbuf *cbuf);
+
+/**
+ * Set the end of the data to the last index of the internal buffer.
+ */
+void cirbuf_align_right(struct cirbuf *cbuf);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CIRBUF_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse.c b/lib/librte_cmdline/cmdline_parse.c
new file mode 100644 (file)
index 0000000..f9d46ca
--- /dev/null
@@ -0,0 +1,544 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <termios.h>
+
+#include <netinet/in.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_rdline.h"
+#include "cmdline_parse.h"
+#include "cmdline.h"
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+#define debug_printf printf
+#else
+#define debug_printf(args...) do {} while(0)
+#endif
+
+#define CMDLINE_BUFFER_SIZE 64
+
+/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
+ * own. */
+static int
+isblank2(char c)
+{
+       if (c == ' ' ||
+           c == '\t' )
+               return 1;
+       return 0;
+}
+
+static int
+isendofline(char c)
+{
+       if (c == '\n' ||
+           c == '\r' )
+               return 1;
+       return 0;
+}
+
+static int
+iscomment(char c)
+{
+       if (c == '#')
+               return 1;
+       return 0;
+}
+
+int
+cmdline_isendoftoken(char c)
+{
+       if (!c || iscomment(c) || isblank2(c) || isendofline(c))
+               return 1;
+       return 0;
+}
+
+static unsigned int
+nb_common_chars(const char * s1, const char * s2)
+{
+       unsigned int i=0;
+
+       while (*s1==*s2 && *s1 && *s2) {
+               s1++;
+               s2++;
+               i++;
+       }
+       return i;
+}
+
+/**
+ * try to match the buffer with an instruction (only the first
+ * nb_match_token tokens if != 0). Return 0 if we match all the
+ * tokens, else the number of matched tokens, else -1.
+ */
+static int
+match_inst(cmdline_parse_inst_t *inst, const char *buf,
+          unsigned int nb_match_token, void * result_buf)
+{
+       unsigned int token_num=0;
+       cmdline_parse_token_hdr_t * token_p;
+       unsigned int i=0;
+       int n = 0;
+       struct cmdline_token_hdr token_hdr;
+
+       token_p = inst->tokens[token_num];
+       if (token_p)
+               memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+       /* check if we match all tokens of inst */
+       while (token_p && (!nb_match_token || i<nb_match_token)) {
+               debug_printf("TK\n");
+               /* skip spaces */
+               while (isblank2(*buf)) {
+                       buf++;
+               }
+
+               /* end of buf */
+               if ( isendofline(*buf) || iscomment(*buf) )
+                       break;
+
+               if (result_buf)
+                       n = token_hdr.ops->parse(token_p, buf,
+                                                (char *)result_buf +
+                                                token_hdr.offset);
+               else
+                       n = token_hdr.ops->parse(token_p, buf, NULL);
+
+               if (n < 0)
+                       break;
+
+               debug_printf("TK parsed (len=%d)\n", n);
+               i++;
+               buf += n;
+
+               token_num ++;
+               token_p = inst->tokens[token_num];
+               if (token_p)
+                       memcpy(&token_hdr, token_p, sizeof(token_hdr));
+       }
+
+       /* does not match */
+       if (i==0)
+               return -1;
+
+       /* in case we want to match a specific num of token */
+       if (nb_match_token) {
+               if (i == nb_match_token) {
+                       return 0;
+               }
+               return i;
+       }
+
+       /* we don't match all the tokens */
+       if (token_p) {
+               return i;
+       }
+
+       /* are there are some tokens more */
+       while (isblank2(*buf)) {
+               buf++;
+       }
+
+       /* end of buf */
+       if ( isendofline(*buf) || iscomment(*buf) )
+               return 0;
+
+       /* garbage after inst */
+       return i;
+}
+
+
+int
+cmdline_parse(struct cmdline *cl, const char * buf)
+{
+       unsigned int inst_num=0;
+       cmdline_parse_inst_t *inst;
+       const char *curbuf;
+       char result_buf[BUFSIZ];
+       void (*f)(void *, struct cmdline *, void *) = NULL;
+       void *data = NULL;
+       int comment = 0;
+       int linelen = 0;
+       int parse_it = 0;
+       int err = CMDLINE_PARSE_NOMATCH;
+       int tok;
+       cmdline_parse_ctx_t *ctx = cl->ctx;
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+       char debug_buf[BUFSIZ];
+#endif
+
+       /*
+        * - look if the buffer contains at least one line
+        * - look if line contains only spaces or comments
+        * - count line length
+        */
+       curbuf = buf;
+       while (! isendofline(*curbuf)) {
+               if ( *curbuf == '\0' ) {
+                       debug_printf("Incomplete buf (len=%d)\n", linelen);
+                       return 0;
+               }
+               if ( iscomment(*curbuf) ) {
+                       comment = 1;
+               }
+               if ( ! isblank2(*curbuf) && ! comment) {
+                       parse_it = 1;
+               }
+               curbuf++;
+               linelen++;
+       }
+
+       /* skip all endofline chars */
+       while (isendofline(buf[linelen])) {
+               linelen++;
+       }
+
+       /* empty line */
+       if ( parse_it == 0 ) {
+               debug_printf("Empty line (len=%d)\n", linelen);
+               return linelen;
+       }
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+       rte_snprintf(debug_buf, (linelen>64 ? 64 : linelen), "%s", buf);
+       debug_printf("Parse line : len=%d, <%s>\n", linelen, debug_buf);
+#endif
+
+       /* parse it !! */
+       inst = ctx[inst_num];
+       while (inst) {
+               debug_printf("INST %d\n", inst_num);
+
+               /* fully parsed */
+               tok = match_inst(inst, buf, 0, result_buf);
+
+               if (tok > 0) /* we matched at least one token */
+                       err = CMDLINE_PARSE_BAD_ARGS;
+
+               else if (!tok) {
+                       debug_printf("INST fully parsed\n");
+                       /* skip spaces */
+                       while (isblank2(*curbuf)) {
+                               curbuf++;
+                       }
+
+                       /* if end of buf -> there is no garbage after inst */
+                       if (isendofline(*curbuf) || iscomment(*curbuf)) {
+                               if (!f) {
+                                       memcpy(&f, &inst->f, sizeof(f));
+                                       memcpy(&data, &inst->data, sizeof(data));
+                               }
+                               else {
+                                       /* more than 1 inst matches */
+                                       err = CMDLINE_PARSE_AMBIGUOUS;
+                                       f=NULL;
+                                       debug_printf("Ambiguous cmd\n");
+                                       break;
+                               }
+                       }
+               }
+
+               inst_num ++;
+               inst = ctx[inst_num];
+       }
+
+       /* call func */
+       if (f) {
+               f(result_buf, cl, data);
+       }
+
+       /* no match */
+       else {
+               debug_printf("No match err=%d\n", err);
+               return err;
+       }
+
+       return linelen;
+}
+
+int
+cmdline_complete(struct cmdline *cl, const char *buf, int *state,
+                char *dst, unsigned int size)
+{
+       const char *partial_tok = buf;
+       unsigned int inst_num = 0;
+       cmdline_parse_inst_t *inst;
+       cmdline_parse_token_hdr_t *token_p;
+       struct cmdline_token_hdr token_hdr;
+       char tmpbuf[CMDLINE_BUFFER_SIZE], comp_buf[CMDLINE_BUFFER_SIZE];
+       unsigned int partial_tok_len;
+       int comp_len = -1;
+       int tmp_len = -1;
+       int nb_token = 0;
+       unsigned int i, n;
+       int l;
+       unsigned int nb_completable;
+       unsigned int nb_non_completable;
+       int local_state = 0;
+       const char *help_str;
+       cmdline_parse_ctx_t *ctx = cl->ctx;
+
+       debug_printf("%s called\n", __func__);
+       memset(&token_hdr, 0, sizeof(token_hdr));
+
+       /* count the number of complete token to parse */
+       for (i=0 ; buf[i] ; i++) {
+               if (!isblank2(buf[i]) && isblank2(buf[i+1]))
+                       nb_token++;
+               if (isblank2(buf[i]) && !isblank2(buf[i+1]))
+                       partial_tok = buf+i+1;
+       }
+       partial_tok_len = strnlen(partial_tok, RDLINE_BUF_SIZE);
+
+       /* first call -> do a first pass */
+       if (*state <= 0) {
+               debug_printf("try complete <%s>\n", buf);
+               debug_printf("there is %d complete tokens, <%s> is incomplete\n",
+                            nb_token, partial_tok);
+
+               nb_completable = 0;
+               nb_non_completable = 0;
+
+               inst = ctx[inst_num];
+               while (inst) {
+                       /* parse the first tokens of the inst */
+                       if (nb_token && match_inst(inst, buf, nb_token, NULL))
+                               goto next;
+
+                       debug_printf("instruction match \n");
+                       token_p = inst->tokens[nb_token];
+                       if (token_p)
+                               memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+                       /* non completable */
+                       if (!token_p ||
+                           !token_hdr.ops->complete_get_nb ||
+                           !token_hdr.ops->complete_get_elt ||
+                           (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
+                               nb_non_completable++;
+                               goto next;
+                       }
+
+                       debug_printf("%d choices for this token\n", n);
+                       for (i=0 ; i<n ; i++) {
+                               if (token_hdr.ops->complete_get_elt(token_p, i,
+                                                                   tmpbuf,
+                                                                   sizeof(tmpbuf)) < 0)
+                                       continue;
+
+                               /* we have at least room for one char */
+                               tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
+                               if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
+                                       tmpbuf[tmp_len] = ' ';
+                                       tmpbuf[tmp_len+1] = 0;
+                               }
+
+                               debug_printf("   choice <%s>\n", tmpbuf);
+
+                               /* does the completion match the
+                                * beginning of the word ? */
+                               if (!strncmp(partial_tok, tmpbuf,
+                                            partial_tok_len)) {
+                                       if (comp_len == -1) {
+                                               rte_snprintf(comp_buf, sizeof(comp_buf),
+                                                        "%s", tmpbuf + partial_tok_len);
+                                               comp_len =
+                                                       strnlen(tmpbuf + partial_tok_len,
+                                                                       sizeof(tmpbuf) - partial_tok_len);
+
+                                       }
+                                       else {
+                                               comp_len =
+                                                       nb_common_chars(comp_buf,
+                                                                       tmpbuf+partial_tok_len);
+                                               comp_buf[comp_len] = 0;
+                                       }
+                                       nb_completable++;
+                               }
+                       }
+               next:
+                       inst_num ++;
+                       inst = ctx[inst_num];
+               }
+
+               debug_printf("total choices %d for this completion\n",
+                            nb_completable);
+
+               /* no possible completion */
+               if (nb_completable == 0 && nb_non_completable == 0)
+                       return 0;
+
+               /* if multichoice is not required */
+               if (*state == 0 && partial_tok_len > 0) {
+                       /* one or several choices starting with the
+                          same chars */
+                       if (comp_len > 0) {
+                               if ((unsigned)(comp_len + 1) > size)
+                                       return 0;
+
+                               rte_snprintf(dst, size, "%s", comp_buf);
+                               dst[comp_len] = 0;
+                               return 2;
+                       }
+               }
+       }
+
+       /* init state correctly */
+       if (*state == -1)
+               *state = 0;
+
+       debug_printf("Multiple choice STATE=%d\n", *state);
+
+       inst_num = 0;
+       inst = ctx[inst_num];
+       while (inst) {
+               /* we need to redo it */
+               inst = ctx[inst_num];
+
+               if (nb_token && match_inst(inst, buf, nb_token, NULL))
+                       goto next2;
+
+               token_p = inst->tokens[nb_token];
+               if (token_p)
+                       memcpy(&token_hdr, token_p, sizeof(token_hdr));
+
+               /* one choice for this token */
+               if (!token_p ||
+                   !token_hdr.ops->complete_get_nb ||
+                   !token_hdr.ops->complete_get_elt ||
+                   (n = token_hdr.ops->complete_get_nb(token_p)) == 0) {
+                       if (local_state < *state) {
+                               local_state++;
+                               goto next2;
+                       }
+                       (*state)++;
+                       if (token_p && token_hdr.ops->get_help) {
+                               token_hdr.ops->get_help(token_p, tmpbuf,
+                                                       sizeof(tmpbuf));
+                               help_str = inst->help_str;
+                               if (help_str)
+                                       rte_snprintf(dst, size, "[%s]: %s", tmpbuf,
+                                                help_str);
+                               else
+                                       rte_snprintf(dst, size, "[%s]: No help",
+                                                tmpbuf);
+                       }
+                       else {
+                               rte_snprintf(dst, size, "[RETURN]");
+                       }
+                       return 1;
+               }
+
+               /* several choices */
+               for (i=0 ; i<n ; i++) {
+                       if (token_hdr.ops->complete_get_elt(token_p, i, tmpbuf,
+                                                           sizeof(tmpbuf)) < 0)
+                               continue;
+                       /* we have at least room for one char */
+                       tmp_len = strnlen(tmpbuf, sizeof(tmpbuf));
+                       if (tmp_len < CMDLINE_BUFFER_SIZE - 1) {
+                               tmpbuf[tmp_len] = ' ';
+                               tmpbuf[tmp_len + 1] = 0;
+                       }
+
+                       debug_printf("   choice <%s>\n", tmpbuf);
+
+                       /* does the completion match the beginning of
+                        * the word ? */
+                       if (!strncmp(partial_tok, tmpbuf,
+                                    partial_tok_len)) {
+                               if (local_state < *state) {
+                                       local_state++;
+                                       continue;
+                               }
+                               (*state)++;
+                               l=rte_snprintf(dst, size, "%s", tmpbuf);
+                               if (l>=0 && token_hdr.ops->get_help) {
+                                       token_hdr.ops->get_help(token_p, tmpbuf,
+                                                               sizeof(tmpbuf));
+                                       help_str = inst->help_str;
+                                       if (help_str)
+                                               rte_snprintf(dst+l, size-l, "[%s]: %s",
+                                                        tmpbuf, help_str);
+                                       else
+                                               rte_snprintf(dst+l, size-l,
+                                                        "[%s]: No help", tmpbuf);
+                               }
+
+                               return 1;
+                       }
+               }
+       next2:
+               inst_num ++;
+               inst = ctx[inst_num];
+       }
+       return 0;
+}
+
diff --git a/lib/librte_cmdline/cmdline_parse.h b/lib/librte_cmdline/cmdline_parse.h
new file mode 100644 (file)
index 0000000..eb9a037
--- /dev/null
@@ -0,0 +1,188 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_PARSE_H_
+#define _CMDLINE_PARSE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef offsetof
+#define offsetof(type, field)  ((size_t) &( ((type *)0)->field) )
+#endif
+
+/* return status for parsing */
+#define CMDLINE_PARSE_SUCCESS        0
+#define CMDLINE_PARSE_AMBIGUOUS     -1
+#define CMDLINE_PARSE_NOMATCH       -2
+#define CMDLINE_PARSE_BAD_ARGS      -3
+
+/* return status for completion */
+#define CMDLINE_PARSE_COMPLETE_FINISHED 0
+#define CMDLINE_PARSE_COMPLETE_AGAIN    1
+#define CMDLINE_PARSE_COMPLETED_BUFFER  2
+
+/**
+ * Stores a pointer to the ops struct, and the offset: the place to
+ * write the parsed result in the destination structure.
+ */
+struct cmdline_token_hdr {
+       struct cmdline_token_ops *ops;
+       unsigned int offset;
+};
+typedef struct cmdline_token_hdr cmdline_parse_token_hdr_t;
+
+/**
+ * A token is defined by this structure.
+ *
+ * parse() takes the token as first argument, then the source buffer
+ * starting at the token we want to parse. The 3rd arg is a pointer
+ * where we store the parsed data (as binary). It returns the number of
+ * parsed chars on success and a negative value on error.
+ *
+ * complete_get_nb() returns the number of possible values for this
+ * token if completion is possible. If it is NULL or if it returns 0,
+ * no completion is possible.
+ *
+ * complete_get_elt() copy in dstbuf (the size is specified in the
+ * parameter) the i-th possible completion for this token.  returns 0
+ * on success or and a negative value on error.
+ *
+ * get_help() fills the dstbuf with the help for the token. It returns
+ * -1 on error and 0 on success.
+ */
+struct cmdline_token_ops {
+       /** parse(token ptr, buf, res pts) */
+       int (*parse)(cmdline_parse_token_hdr_t *, const char *, void *);
+       /** return the num of possible choices for this token */
+       int (*complete_get_nb)(cmdline_parse_token_hdr_t *);
+       /** return the elt x for this token (token, idx, dstbuf, size) */
+       int (*complete_get_elt)(cmdline_parse_token_hdr_t *, int, char *, unsigned int);
+       /** get help for this token (token, dstbuf, size) */
+       int (*get_help)(cmdline_parse_token_hdr_t *, char *, unsigned int);
+};
+
+struct cmdline;
+/**
+ * Store a instruction, which is a pointer to a callback function and
+ * its parameter that is called when the instruction is parsed, a help
+ * string, and a list of token composing this instruction.
+ */
+struct cmdline_inst {
+       /* f(parsed_struct, data) */
+       void (*f)(void *, struct cmdline *, void *);
+       void *data;
+       const char *help_str;
+       cmdline_parse_token_hdr_t *tokens[];
+};
+typedef struct cmdline_inst cmdline_parse_inst_t;
+
+/**
+ * A context is identified by its name, and contains a list of
+ * instruction
+ *
+ */
+typedef cmdline_parse_inst_t *cmdline_parse_ctx_t;
+
+/**
+ * Try to parse a buffer according to the specified context. The
+ * argument buf must ends with "\n\0". The function returns
+ * CMDLINE_PARSE_AMBIGUOUS, CMDLINE_PARSE_NOMATCH or
+ * CMDLINE_PARSE_BAD_ARGS on error. Else it calls the associated
+ * function (defined in the context) and returns 0
+ * (CMDLINE_PARSE_SUCCESS).
+ */
+int cmdline_parse(struct cmdline *cl, const char *buf);
+
+/**
+ * complete() must be called with *state==0 (try to complete) or
+ * with *state==-1 (just display choices), then called without
+ * modifying *state until it returns CMDLINE_PARSE_COMPLETED_BUFFER or
+ * CMDLINE_PARSE_COMPLETED_BUFFER.
+ *
+ * It returns < 0 on error.
+ *
+ * Else it returns:
+ *   - CMDLINE_PARSE_COMPLETED_BUFFER on completion (one possible
+ *     choice). In this case, the chars are appended in dst buffer.
+ *   - CMDLINE_PARSE_COMPLETE_AGAIN if there is several possible
+ *     choices. In this case, you must call the function again,
+ *     keeping the value of state intact.
+ *   - CMDLINE_PARSE_COMPLETED_BUFFER when the iteration is
+ *     finished. The dst is not valid for this last call.
+ *
+ * The returned dst buf ends with \0.
+ */
+int cmdline_complete(struct cmdline *cl, const char *buf, int *state,
+                    char *dst, unsigned int size);
+
+
+/* return true if(!c || iscomment(c) || isblank(c) ||
+ * isendofline(c)) */
+int cmdline_isendoftoken(char c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_PARSE_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.c b/lib/librte_cmdline/cmdline_parse_etheraddr.c
new file mode 100644 (file)
index 0000000..5700a74
--- /dev/null
@@ -0,0 +1,172 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <net/ethernet.h>
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_etheraddr.h"
+
+struct cmdline_token_ops cmdline_token_etheraddr_ops = {
+       .parse = cmdline_parse_etheraddr,
+       .complete_get_nb = NULL,
+       .complete_get_elt = NULL,
+       .get_help = cmdline_get_help_etheraddr,
+};
+
+
+#define ETHER_ADDRSTRLEN 18
+
+#ifdef __linux__
+#define ea_oct ether_addr_octet
+#else
+#define ea_oct octet
+#endif
+
+
+static struct ether_addr *
+my_ether_aton(const char *a)
+{
+       int i;
+       char *end;
+       unsigned long o[ETHER_ADDR_LEN];
+       static struct ether_addr ether_addr;
+
+       i = 0;
+       do {
+               errno = 0;
+               o[i] = strtoul(a, &end, 16);
+               if (errno != 0 || end == a || (end[0] != ':' && end[0] != 0))
+                       return (NULL);
+               a = end + 1;
+       } while (++i != sizeof (o) / sizeof (o[0]) && end[0] != 0);
+
+       /* Junk at the end of line */
+       if (end[0] != 0)
+               return (NULL);
+
+       /* Support the format XX:XX:XX:XX:XX:XX */
+       if (i == ETHER_ADDR_LEN) {
+               while (i-- != 0) {
+                       if (o[i] > UINT8_MAX)
+                               return (NULL);
+                       ether_addr.ea_oct[i] = (uint8_t)o[i];
+               }
+       /* Support the format XXXX:XXXX:XXXX */
+       } else if (i == ETHER_ADDR_LEN / 2) {
+               while (i-- != 0) {
+                       if (o[i] > UINT16_MAX)
+                               return (NULL);
+                       ether_addr.ea_oct[i * 2] = (uint8_t)(o[i] >> 8);
+                       ether_addr.ea_oct[i * 2 + 1] = (uint8_t)(o[i] & 0xff);
+               }
+       /* unknown format */
+       } else
+               return (NULL);
+
+       return (struct ether_addr *)&ether_addr;
+}
+
+int
+cmdline_parse_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+                       const char *buf, void *res)
+{
+       unsigned int token_len = 0;
+       char ether_str[ETHER_ADDRSTRLEN+1];
+       struct ether_addr *tmp;
+
+       if (! *buf)
+               return -1;
+
+       while (!cmdline_isendoftoken(buf[token_len]))
+               token_len++;
+
+       /* if token is too big... */
+       if (token_len >= ETHER_ADDRSTRLEN)
+               return -1;
+
+       rte_snprintf(ether_str, token_len+1, "%s", buf);
+
+       tmp = my_ether_aton(ether_str);
+       if (tmp == NULL)
+               return -1;
+
+       memcpy(res, tmp, sizeof(struct ether_addr));
+       return token_len;
+}
+
+int cmdline_get_help_etheraddr(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+                              char *dstbuf, unsigned int size)
+{
+       rte_snprintf(dstbuf, size, "Ethernet address");
+       return 0;
+}
diff --git a/lib/librte_cmdline/cmdline_parse_etheraddr.h b/lib/librte_cmdline/cmdline_parse_etheraddr.h
new file mode 100644 (file)
index 0000000..d6d30b6
--- /dev/null
@@ -0,0 +1,102 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_ETHERADDR_H_
+#define _PARSE_ETHERADDR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline_token_etheraddr_data {
+       uint8_t flags;
+};
+
+struct cmdline_token_etheraddr {
+       struct cmdline_token_hdr hdr;
+};
+typedef struct cmdline_token_etheraddr cmdline_parse_token_etheraddr_t;
+
+extern struct cmdline_token_ops cmdline_token_etheraddr_ops;
+
+int cmdline_parse_etheraddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+                           void *res);
+int cmdline_get_help_etheraddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                              unsigned int size);
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_ETHERADDR_INITIALIZER(structure, field)      \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_etheraddr_ops,        \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _PARSE_ETHERADDR_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse_ipaddr.c b/lib/librte_cmdline/cmdline_parse_ipaddr.c
new file mode 100644 (file)
index 0000000..66a1493
--- /dev/null
@@ -0,0 +1,383 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * For inet_ntop() functions:
+ *
+ * Copyright (c) 1996 by Internet Software Consortium.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
+ * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
+ * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+ * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
+ * SOFTWARE.
+ */
+
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <netinet/in.h>
+#ifndef __linux__
+#include <net/socket.h>
+#endif
+
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_ipaddr.h"
+
+struct cmdline_token_ops cmdline_token_ipaddr_ops = {
+       .parse = cmdline_parse_ipaddr,
+       .complete_get_nb = NULL,
+       .complete_get_elt = NULL,
+       .get_help = cmdline_get_help_ipaddr,
+};
+
+#define INADDRSZ 4
+#define IN6ADDRSZ 16
+
+/*
+ * WARNING: Don't even consider trying to compile this on a system where
+ * sizeof(int) < 4.  sizeof(int) > 4 is fine; all the world's not a VAX.
+ */
+
+static int inet_pton4(const char *src, unsigned char *dst);
+static int inet_pton6(const char *src, unsigned char *dst);
+
+/* int
+ * inet_pton(af, src, dst)
+ *      convert from presentation format (which usually means ASCII printable)
+ *      to network format (which is usually some kind of binary format).
+ * return:
+ *      1 if the address was valid for the specified address family
+ *      0 if the address wasn't valid (`dst' is untouched in this case)
+ *      -1 if some other error occurred (`dst' is untouched in this case, too)
+ * author:
+ *      Paul Vixie, 1996.
+ */
+static int
+my_inet_pton(int af, const char *src, void *dst)
+{
+       switch (af) {
+               case AF_INET:
+                       return (inet_pton4(src, dst));
+               case AF_INET6:
+                       return (inet_pton6(src, dst));
+               default:
+                       errno = EAFNOSUPPORT;
+                       return (-1);
+       }
+       /* NOTREACHED */
+}
+
+/* int
+ * inet_pton4(src, dst)
+ *      like inet_aton() but without all the hexadecimal and shorthand.
+ * return:
+ *      1 if `src' is a valid dotted quad, else 0.
+ * notice:
+ *      does not touch `dst' unless it's returning 1.
+ * author:
+ *      Paul Vixie, 1996.
+ */
+static int
+inet_pton4(const char *src, unsigned char *dst)
+{
+       static const char digits[] = "0123456789";
+       int saw_digit, octets, ch;
+       unsigned char tmp[INADDRSZ], *tp;
+
+       saw_digit = 0;
+       octets = 0;
+       *(tp = tmp) = 0;
+       while ((ch = *src++) != '\0') {
+               const char *pch;
+
+               if ((pch = strchr(digits, ch)) != NULL) {
+                       unsigned int new = *tp * 10 + (pch - digits);
+
+                       if (new > 255)
+                               return (0);
+                       if (! saw_digit) {
+                               if (++octets > 4)
+                                       return (0);
+                               saw_digit = 1;
+                       }
+                       *tp = (unsigned char)new;
+               } else if (ch == '.' && saw_digit) {
+                       if (octets == 4)
+                               return (0);
+                       *++tp = 0;
+                       saw_digit = 0;
+               } else
+                       return (0);
+       }
+       if (octets < 4)
+               return (0);
+
+       memcpy(dst, tmp, INADDRSZ);
+       return (1);
+}
+
+/* int
+ * inet_pton6(src, dst)
+ *      convert presentation level address to network order binary form.
+ * return:
+ *      1 if `src' is a valid [RFC1884 2.2] address, else 0.
+ * notice:
+ *      (1) does not touch `dst' unless it's returning 1.
+ *      (2) :: in a full address is silently ignored.
+ * credit:
+ *      inspired by Mark Andrews.
+ * author:
+ *      Paul Vixie, 1996.
+ */
+static int
+inet_pton6(const char *src, unsigned char *dst)
+{
+       static const char xdigits_l[] = "0123456789abcdef",
+               xdigits_u[] = "0123456789ABCDEF";
+       unsigned char tmp[IN6ADDRSZ], *tp, *endp, *colonp;
+       const char *xdigits, *curtok;
+       int ch, saw_xdigit, count_xdigit;
+       unsigned int val;
+
+       memset((tp = tmp), '\0', IN6ADDRSZ);
+       endp = tp + IN6ADDRSZ;
+       colonp = NULL;
+       /* Leading :: requires some special handling. */
+       if (*src == ':')
+               if (*++src != ':')
+                       return (0);
+       curtok = src;
+       saw_xdigit = count_xdigit = 0;
+       val = 0;
+       while ((ch = *src++) != '\0') {
+               const char *pch;
+
+               if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
+                       pch = strchr((xdigits = xdigits_u), ch);
+               if (pch != NULL) {
+                       if (count_xdigit >= 4)
+                               return (0);
+                       val <<= 4;
+                       val |= (pch - xdigits);
+                       if (val > 0xffff)
+                               return (0);
+                       saw_xdigit = 1;
+                       count_xdigit++;
+                       continue;
+               }
+               if (ch == ':') {
+                       curtok = src;
+                       if (!saw_xdigit) {
+                               if (colonp)
+                                       return (0);
+                               colonp = tp;
+                               continue;
+                       } else if (*src == '\0') {
+                               return (0);
+                       }
+                       if (tp + sizeof(int16_t) > endp)
+                               return (0);
+                       *tp++ = (unsigned char) ((val >> 8) & 0xff);
+                       *tp++ = (unsigned char) (val & 0xff);
+                       saw_xdigit = 0;
+                       count_xdigit = 0;
+                       val = 0;
+                       continue;
+               }
+               if (ch == '.' && ((tp + INADDRSZ) <= endp) &&
+                   inet_pton4(curtok, tp) > 0) {
+                       tp += INADDRSZ;
+                       saw_xdigit = 0;
+                       count_xdigit = 0;
+                       break;  /* '\0' was seen by inet_pton4(). */
+               }
+               return (0);
+       }
+       if (saw_xdigit) {
+               if (tp + sizeof(int16_t) > endp)
+                       return (0);
+               *tp++ = (unsigned char) ((val >> 8) & 0xff);
+               *tp++ = (unsigned char) (val & 0xff);
+       }
+       if (colonp != NULL) {
+               /*
+                * Since some memmove()'s erroneously fail to handle
+                * overlapping regions, we'll do the shift by hand.
+                */
+               const int n = tp - colonp;
+               int i;
+
+               for (i = 1; i <= n; i++) {
+                       endp[- i] = colonp[n - i];
+                       colonp[n - i] = 0;
+               }
+               tp = endp;
+       }
+       if (tp != endp)
+               return (0);
+       memcpy(dst, tmp, IN6ADDRSZ);
+       return (1);
+}
+
+int
+cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *buf, void *res)
+{
+       struct cmdline_token_ipaddr *tk2 = (struct cmdline_token_ipaddr *)tk;
+       unsigned int token_len = 0;
+       char ip_str[INET6_ADDRSTRLEN+4+1]; /* '+4' is for prefixlen (if any) */
+       cmdline_ipaddr_t ipaddr;
+       char *prefix, *prefix_end;
+       long prefixlen;
+
+       if (! *buf)
+               return -1;
+
+       while (!cmdline_isendoftoken(buf[token_len]))
+               token_len++;
+
+       /* if token is too big... */
+       if (token_len >= INET6_ADDRSTRLEN+4)
+               return -1;
+
+       rte_snprintf(ip_str, token_len+1, "%s", buf);
+
+       /* convert the network prefix */
+       if (tk2->ipaddr_data.flags & CMDLINE_IPADDR_NETWORK) {
+               prefix = strrchr(ip_str, '/');
+               if (prefix == NULL)
+                       return -1;
+               *prefix = '\0';
+               prefix ++;
+               errno = 0;
+               prefixlen = strtol(prefix, &prefix_end, 10);
+               if (errno || (*prefix_end != '\0') )
+                       return -1;
+               ipaddr.prefixlen = prefixlen;
+       }
+       else {
+               ipaddr.prefixlen = 0;
+       }
+
+       /* convert the IP addr */
+       if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V4) &&
+           my_inet_pton(AF_INET, ip_str, &ipaddr.addr.ipv4) == 1) {
+               ipaddr.family = AF_INET;
+               if (res != NULL)
+                       memcpy(res, &ipaddr, sizeof(ipaddr));
+               return token_len;
+       }
+       if ((tk2->ipaddr_data.flags & CMDLINE_IPADDR_V6) &&
+           my_inet_pton(AF_INET6, ip_str, &ipaddr.addr.ipv6) == 1) {
+               ipaddr.family = AF_INET6;
+               if (res != NULL)
+                       memcpy(res, &ipaddr, sizeof(ipaddr));
+               return token_len;
+       }
+       return -1;
+
+}
+
+int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                           unsigned int size)
+{
+       struct cmdline_token_ipaddr *tk2 = (struct cmdline_token_ipaddr *)tk;
+
+       switch (tk2->ipaddr_data.flags) {
+       case CMDLINE_IPADDR_V4:
+               rte_snprintf(dstbuf, size, "IPv4");
+               break;
+       case CMDLINE_IPADDR_V6:
+               rte_snprintf(dstbuf, size, "IPv6");
+               break;
+       case CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
+               rte_snprintf(dstbuf, size, "IPv4/IPv6");
+               break;
+       case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4:
+               rte_snprintf(dstbuf, size, "IPv4 network");
+               break;
+       case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V6:
+               rte_snprintf(dstbuf, size, "IPv6 network");
+               break;
+       case CMDLINE_IPADDR_NETWORK|CMDLINE_IPADDR_V4|CMDLINE_IPADDR_V6:
+               rte_snprintf(dstbuf, size, "IPv4/IPv6 network");
+               break;
+       default:
+               rte_snprintf(dstbuf, size, "IPaddr (bad flags)");
+               break;
+       }
+       return 0;
+}
diff --git a/lib/librte_cmdline/cmdline_parse_ipaddr.h b/lib/librte_cmdline/cmdline_parse_ipaddr.h
new file mode 100644 (file)
index 0000000..1a434e5
--- /dev/null
@@ -0,0 +1,194 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_IPADDR_H_
+#define _PARSE_IPADDR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define CMDLINE_IPADDR_V4      0x01
+#define CMDLINE_IPADDR_V6      0x02
+#define CMDLINE_IPADDR_NETWORK 0x04
+
+struct cmdline_ipaddr {
+       uint8_t family;
+       union {
+               struct in_addr ipv4;
+               struct in6_addr ipv6;
+       } addr;
+       unsigned int prefixlen; /* in case of network only */
+};
+typedef struct cmdline_ipaddr cmdline_ipaddr_t;
+
+struct cmdline_token_ipaddr_data {
+       uint8_t flags;
+};
+
+struct cmdline_token_ipaddr {
+       struct cmdline_token_hdr hdr;
+       struct cmdline_token_ipaddr_data ipaddr_data;
+};
+typedef struct cmdline_token_ipaddr cmdline_parse_token_ipaddr_t;
+
+extern struct cmdline_token_ops cmdline_token_ipaddr_ops;
+
+int cmdline_parse_ipaddr(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+                        void *res);
+int cmdline_get_help_ipaddr(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                           unsigned int size);
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPADDR_INITIALIZER(structure, field)         \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V4 |                \
+               CMDLINE_IPADDR_V6,                          \
+       },                                                  \
+}
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPV4_INITIALIZER(structure, field)           \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V4,                 \
+       },                                                  \
+}
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPV6_INITIALIZER(structure, field)           \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V6,                 \
+       },                                                  \
+}
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPNET_INITIALIZER(structure, field)          \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V4 |                \
+               CMDLINE_IPADDR_V6 |                         \
+               CMDLINE_IPADDR_NETWORK,                     \
+       },                                                  \
+}
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPV4NET_INITIALIZER(structure, field)        \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V4 |                \
+               CMDLINE_IPADDR_NETWORK,                     \
+       },                                                  \
+}
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_IPV6NET_INITIALIZER(structure, field)        \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_ipaddr_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .ipaddr_data = {                                    \
+               .flags = CMDLINE_IPADDR_V4 |                \
+               CMDLINE_IPADDR_NETWORK,                     \
+       },                                                  \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_IPADDR_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse_num.c b/lib/librte_cmdline/cmdline_parse_num.c
new file mode 100644 (file)
index 0000000..087cf48
--- /dev/null
@@ -0,0 +1,493 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_num.h"
+
+#ifdef RTE_LIBRTE_CMDLINE_DEBUG
+#define debug_printf(args...) printf(args)
+#else
+#define debug_printf(args...) do {} while(0)
+#endif
+
+struct cmdline_token_ops cmdline_token_num_ops = {
+       .parse = cmdline_parse_num,
+       .complete_get_nb = NULL,
+       .complete_get_elt = NULL,
+       .get_help = cmdline_get_help_num,
+};
+
+
+enum num_parse_state_t {
+       START,
+       DEC_NEG,
+       BIN,
+       HEX,
+       FLOAT_POS,
+       FLOAT_NEG,
+
+       ERROR,
+
+       FIRST_OK, /* not used */
+       ZERO_OK,
+       HEX_OK,
+       OCTAL_OK,
+       BIN_OK,
+       DEC_NEG_OK,
+       DEC_POS_OK,
+       FLOAT_POS_OK,
+       FLOAT_NEG_OK
+};
+
+/* Keep it sync with enum in .h */
+static const char * num_help[] = {
+       "UINT8", "UINT16", "UINT32", "UINT64",
+       "INT8", "INT16", "INT32", "INT64",
+#ifdef CMDLINE_HAVE_FLOAT
+       "FLOAT",
+#endif
+};
+
+static inline int
+add_to_res(unsigned int c, uint64_t *res, unsigned int base)
+{
+       /* overflow */
+       if ( (UINT64_MAX - c) / base < *res ) {
+               return -1;
+       }
+
+       *res = (uint64_t) (*res * base + c);
+       return 0;
+}
+
+
+/* parse an int or a float */
+int
+cmdline_parse_num(cmdline_parse_token_hdr_t *tk, const char *srcbuf, void *res)
+{
+       struct cmdline_token_num_data nd;
+       enum num_parse_state_t st = START;
+       const char * buf = srcbuf;
+       char c = *buf;
+       uint64_t res1 = 0;
+#ifdef CMDLINE_HAVE_FLOAT
+       uint64_t res2 = 0, res3 = 1;
+#endif
+
+       memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
+
+       while ( st != ERROR && c && ! cmdline_isendoftoken(c) ) {
+               debug_printf("%c %x -> ", c, c);
+               switch (st) {
+               case START:
+                       if (c == '-') {
+                               st = DEC_NEG;
+                       }
+                       else if (c == '0') {
+                               st = ZERO_OK;
+                       }
+#ifdef CMDLINE_HAVE_FLOAT
+                       else if (c == '.') {
+                               st = FLOAT_POS;
+                               res1 = 0;
+                       }
+#endif
+                       else if (c >= '1' && c <= '9') {
+                               if (add_to_res(c - '0', &res1, 10) < 0)
+                                       st = ERROR;
+                               else
+                                       st = DEC_POS_OK;
+                       }
+                       else  {
+                               st = ERROR;
+                       }
+                       break;
+
+               case ZERO_OK:
+                       if (c == 'x') {
+                               st = HEX;
+                       }
+                       else if (c == 'b') {
+                               st = BIN;
+                       }
+#ifdef CMDLINE_HAVE_FLOAT
+                       else if (c == '.') {
+                               st = FLOAT_POS;
+                               res1 = 0;
+                       }
+#endif
+                       else if (c >= '0' && c <= '7') {
+                               if (add_to_res(c - '0', &res1, 10) < 0)
+                                       st = ERROR;
+                               else
+                                       st = OCTAL_OK;
+                       }
+                       else  {
+                               st = ERROR;
+                       }
+                       break;
+
+               case DEC_NEG:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res1, 10) < 0)
+                                       st = ERROR;
+                               else
+                                       st = DEC_NEG_OK;
+                       }
+#ifdef CMDLINE_HAVE_FLOAT
+                       else if (c == '.') {
+                               res1 = 0;
+                               st = FLOAT_NEG;
+                       }
+#endif
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case DEC_NEG_OK:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res1, 10) < 0)
+                                       st = ERROR;
+                       }
+#ifdef CMDLINE_HAVE_FLOAT
+                       else if (c == '.') {
+                               st = FLOAT_NEG;
+                       }
+#endif
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case DEC_POS_OK:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res1, 10) < 0)
+                                       st = ERROR;
+                       }
+#ifdef CMDLINE_HAVE_FLOAT
+                       else if (c == '.') {
+                               st = FLOAT_POS;
+                       }
+#endif
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case HEX:
+                       st = HEX_OK;
+                       /* no break */
+               case HEX_OK:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res1, 16) < 0)
+                                       st = ERROR;
+                       }
+                       else if (c >= 'a' && c <= 'f') {
+                               if (add_to_res(c - 'a' + 10, &res1, 16) < 0)
+                                       st = ERROR;
+                       }
+                       else if (c >= 'A' && c <= 'F') {
+                               if (add_to_res(c - 'A' + 10, &res1, 16) < 0)
+                                       st = ERROR;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+
+               case OCTAL_OK:
+                       if (c >= '0' && c <= '7') {
+                               if (add_to_res(c - '0', &res1, 8) < 0)
+                                       st = ERROR;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case BIN:
+                       st = BIN_OK;
+                       /* no break */
+               case BIN_OK:
+                       if (c >= '0' && c <= '1') {
+                               if (add_to_res(c - '0', &res1, 2) < 0)
+                                       st = ERROR;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+#ifdef CMDLINE_HAVE_FLOAT
+               case FLOAT_POS:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res2, 10) < 0)
+                                       st = ERROR;
+                               else
+                                       st = FLOAT_POS_OK;
+                               res3 = 10;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case FLOAT_NEG:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res2, 10) < 0)
+                                       st = ERROR;
+                               else
+                                       st = FLOAT_NEG_OK;
+                               res3 = 10;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case FLOAT_POS_OK:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res2, 10) < 0)
+                                       st = ERROR;
+                               if (add_to_res(0, &res3, 10) < 0)
+                                       st = ERROR;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+
+               case FLOAT_NEG_OK:
+                       if (c >= '0' && c <= '9') {
+                               if (add_to_res(c - '0', &res2, 10) < 0)
+                                       st = ERROR;
+                               if (add_to_res(0, &res3, 10) < 0)
+                                       st = ERROR;
+                       }
+                       else {
+                               st = ERROR;
+                       }
+                       break;
+#endif
+
+               default:
+                       debug_printf("not impl ");
+
+               }
+
+#ifdef CMDLINE_HAVE_FLOAT
+               debug_printf("(%"PRIu32")  (%"PRIu32")  (%"PRIu32")\n",
+                            res1, res2, res3);
+#else
+               debug_printf("(%"PRIu32")\n", res1);
+#endif
+
+               buf ++;
+               c = *buf;
+
+               /* token too long */
+               if (buf-srcbuf > 127)
+                       return -1;
+       }
+
+       switch (st) {
+       case ZERO_OK:
+       case DEC_POS_OK:
+       case HEX_OK:
+       case OCTAL_OK:
+       case BIN_OK:
+               if ( nd.type == INT8 && res1 <= INT8_MAX ) {
+                       if (res)
+                               *(int8_t *)res = (int8_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == INT16 && res1 <= INT16_MAX ) {
+                       if (res)
+                               *(int16_t *)res = (int16_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == INT32 && res1 <= INT32_MAX ) {
+                       if (res)
+                               *(int32_t *)res = (int32_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == UINT8 && res1 <= UINT8_MAX ) {
+                       if (res)
+                               *(uint8_t *)res = (uint8_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if (nd.type == UINT16  && res1 <= UINT16_MAX ) {
+                       if (res)
+                               *(uint16_t *)res = (uint16_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == UINT32 ) {
+                       if (res)
+                               *(uint32_t *)res = (uint32_t) res1;
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == UINT64 ) {
+                       if (res)
+                               *(uint64_t *)res = res1;
+                       return (buf-srcbuf);
+               }
+#ifdef CMDLINE_HAVE_FLOAT
+               else if ( nd.type == FLOAT ) {
+                       if (res)
+                               *(float *)res = (float)res1;
+                       return (buf-srcbuf);
+               }
+#endif
+               else {
+                       return -1;
+               }
+               break;
+
+       case DEC_NEG_OK:
+               if ( nd.type == INT8 && res1 <= INT8_MAX + 1 ) {
+                       if (res)
+                               *(int8_t *)res = (int8_t) (-res1);
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == INT16 && res1 <= (uint16_t)INT16_MAX + 1 ) {
+                       if (res)
+                               *(int16_t *)res = (int16_t) (-res1);
+                       return (buf-srcbuf);
+               }
+               else if ( nd.type == INT32 && res1 <= (uint32_t)INT32_MAX + 1 ) {
+                       if (res)
+                               *(int32_t *)res = (int32_t) (-res1);
+                       return (buf-srcbuf);
+               }
+#ifdef CMDLINE_HAVE_FLOAT
+               else if ( nd.type == FLOAT ) {
+                       if (res)
+                               *(float *)res = - (float)res1;
+                       return (buf-srcbuf);
+               }
+#endif
+               else {
+                       return -1;
+               }
+               break;
+
+#ifdef CMDLINE_HAVE_FLOAT
+       case FLOAT_POS:
+       case FLOAT_POS_OK:
+               if ( nd.type == FLOAT ) {
+                       if (res)
+                               *(float *)res = (float)res1 + ((float)res2 / (float)res3);
+                       return (buf-srcbuf);
+
+               }
+               else {
+                       return -1;
+               }
+               break;
+
+       case FLOAT_NEG:
+       case FLOAT_NEG_OK:
+               if ( nd.type == FLOAT ) {
+                       if (res)
+                               *(float *)res = - ((float)res1 + ((float)res2 / (float)res3));
+                       return (buf-srcbuf);
+
+               }
+               else {
+                       return -1;
+               }
+               break;
+#endif
+       default:
+               debug_printf("error\n");
+               return -1;
+       }
+}
+
+
+/* parse an int or a float */
+int
+cmdline_get_help_num(cmdline_parse_token_hdr_t *tk, char *dstbuf, unsigned int size)
+{
+       struct cmdline_token_num_data nd;
+
+       memcpy(&nd, &((struct cmdline_token_num *)tk)->num_data, sizeof(nd));
+
+       /* should not happen.... don't so this test */
+       /* if (nd.type >= (sizeof(num_help)/sizeof(const char *))) */
+       /* return -1; */
+
+       rte_snprintf(dstbuf, size, "%s", num_help[nd.type]);
+       dstbuf[size-1] = '\0';
+       return 0;
+}
diff --git a/lib/librte_cmdline/cmdline_parse_num.h b/lib/librte_cmdline/cmdline_parse_num.h
new file mode 100644 (file)
index 0000000..34aaa93
--- /dev/null
@@ -0,0 +1,119 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_NUM_H_
+#define _PARSE_NUM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum cmdline_numtype {
+       UINT8 = 0,
+       UINT16,
+       UINT32,
+       UINT64,
+       INT8,
+       INT16,
+       INT32,
+       INT64
+#ifndef NO_PARSE_FLOAT
+       ,FLOAT
+#endif
+};
+
+struct cmdline_token_num_data {
+       enum cmdline_numtype type;
+};
+
+struct cmdline_token_num {
+       struct cmdline_token_hdr hdr;
+       struct cmdline_token_num_data num_data;
+};
+typedef struct cmdline_token_num cmdline_parse_token_num_t;
+
+extern struct cmdline_token_ops cmdline_token_num_ops;
+
+int cmdline_parse_num(cmdline_parse_token_hdr_t *tk,
+                     const char *srcbuf, void *res);
+int cmdline_get_help_num(cmdline_parse_token_hdr_t *tk,
+                        char *dstbuf, unsigned int size);
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_NUM_INITIALIZER(structure, field, numtype)   \
+{                                                         \
+       .hdr = {                                           \
+               .ops = &cmdline_token_num_ops,             \
+               .offset = offsetof(structure, field),      \
+       },                                                 \
+       .num_data = {                                      \
+               .type = numtype,                           \
+       },                                                 \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_NUM_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse_portlist.c b/lib/librte_cmdline/cmdline_parse_portlist.c
new file mode 100644 (file)
index 0000000..72b3b91
--- /dev/null
@@ -0,0 +1,172 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <netinet/in.h>
+#ifndef __linux__
+#include <net/socket.h>
+#endif
+
+#include <rte_string_fns.h>
+#include "cmdline_parse.h"
+#include "cmdline_parse_portlist.h"
+
+struct cmdline_token_ops cmdline_token_portlist_ops = {
+       .parse = cmdline_parse_portlist,
+       .complete_get_nb = NULL,
+       .complete_get_elt = NULL,
+       .get_help = cmdline_get_help_portlist,
+};
+
+static void
+parse_set_list(cmdline_portlist_t * pl, int low, int high)
+{
+       do {
+               pl->map |= (1 << low++);
+       } while (low <= high);
+}
+
+static int
+parse_ports(cmdline_portlist_t * pl, const char * str)
+{
+       size_t ps, pe;
+       const char *first, *last;
+       char *end;
+
+       for (first = str, last = first;
+           first != NULL && last != NULL;
+           first = last + 1) {
+
+               last = strchr(first, ',');
+
+               errno = 0;
+               ps = strtoul(first, &end, 10);
+               if (errno != 0 || end == first ||
+                   (end[0] != '-' && end[0] != 0 && end != last))
+                       return (-1);
+
+               /* Support for N-M portlist format */
+               if (end[0] == '-') {
+                       errno = 0;
+                       first = end + 1;
+                       pe = strtoul(first, &end, 10);
+                       if (errno != 0 || end == first ||
+                           (end[0] != 0 && end != last))
+                               return (-1);
+               } else {
+                       pe = ps;
+               }
+
+               if (ps > pe || pe >= sizeof (pl->map) * 8)
+                       return (-1);
+
+               parse_set_list(pl, ps, pe);
+       }
+
+       return (0);
+}
+
+int
+cmdline_parse_portlist(__attribute__((unused)) cmdline_parse_token_hdr_t *tk,
+               const char *buf, void *res)
+{
+       unsigned int token_len = 0;
+       char portlist_str[PORTLIST_TOKEN_SIZE+1];
+       cmdline_portlist_t *pl = res;
+
+       if (! *buf)
+               return (-1);
+
+       while (!cmdline_isendoftoken(buf[token_len]) &&
+           (token_len < PORTLIST_TOKEN_SIZE))
+               token_len++;
+
+       if (token_len >= PORTLIST_TOKEN_SIZE)
+               return (-1);
+
+       if (pl == NULL)
+               return (token_len);
+
+       rte_snprintf(portlist_str, token_len+1, "%s", buf);
+
+       pl->map = 0;
+       if (strcmp("all", portlist_str) == 0)
+               pl->map = UINT32_MAX;
+       else if (parse_ports(pl, portlist_str) != 0)
+               return (-1);
+
+       return token_len;
+}
+
+int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                           unsigned int size)
+{
+       (void)tk;
+       rte_snprintf(dstbuf, size, "range of ports as 3,4-6,8-19,20");
+       return 0;
+}
diff --git a/lib/librte_cmdline/cmdline_parse_portlist.h b/lib/librte_cmdline/cmdline_parse_portlist.h
new file mode 100644 (file)
index 0000000..6f481ca
--- /dev/null
@@ -0,0 +1,113 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2010, Keith Wiles <keith.wiles@windriver.com>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_PORTLIST_H_
+#define _PARSE_PORTLIST_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* size of a parsed string */
+#define PORTLIST_TOKEN_SIZE    128
+#define PORTLIST_MAX_TOKENS    32
+
+typedef struct cmdline_portlist {
+       uint32_t                map;
+} cmdline_portlist_t;
+
+struct cmdline_token_portlist_data {
+       uint8_t         flags;
+};
+
+struct cmdline_token_portlist {
+       struct cmdline_token_hdr hdr;
+       struct cmdline_token_portlist_data range_data;
+};
+typedef struct cmdline_token_portlist cmdline_parse_token_portlist_t;
+
+extern struct cmdline_token_ops cmdline_token_portlist_ops;
+
+int cmdline_parse_portlist(cmdline_parse_token_hdr_t *tk,
+                     const char *srcbuf, void *res);
+int cmdline_get_help_portlist(cmdline_parse_token_hdr_t *tk,
+                        char *dstbuf, unsigned int size);
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_PORTLIST_INITIALIZER(structure, field)           \
+{                                                              \
+       .hdr = {                                                \
+               .ops = &cmdline_token_portlist_ops,             \
+               .offset = offsetof(structure, field),           \
+       },                                                      \
+       .range_data = {                                         \
+               .flags = 0,                                     \
+       },                                                      \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_PORTLIST_H_ */
diff --git a/lib/librte_cmdline/cmdline_parse_string.c b/lib/librte_cmdline/cmdline_parse_string.c
new file mode 100644 (file)
index 0000000..55cc4d5
--- /dev/null
@@ -0,0 +1,228 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <rte_string_fns.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_parse_string.h"
+
+struct cmdline_token_ops cmdline_token_string_ops = {
+       .parse = cmdline_parse_string,
+       .complete_get_nb = cmdline_complete_get_nb_string,
+       .complete_get_elt = cmdline_complete_get_elt_string,
+       .get_help = cmdline_get_help_string,
+};
+
+#define MULTISTRING_HELP "Mul-choice STRING"
+#define ANYSTRING_HELP   "Any STRING"
+#define FIXEDSTRING_HELP "Fixed STRING"
+
+static unsigned int
+get_token_len(const char *s)
+{
+       char c;
+       unsigned int i=0;
+
+       c = s[i];
+       while (c!='#' && c!='\0') {
+               i++;
+               c = s[i];
+       }
+       return i;
+}
+
+static const char *
+get_next_token(const char *s)
+{
+       unsigned int i;
+       i = get_token_len(s);
+       if (s[i] == '#')
+               return s+i+1;
+       return NULL;
+}
+
+int
+cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *buf, void *res)
+{
+       struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk;
+       struct cmdline_token_string_data *sd = &tk2->string_data;
+       unsigned int token_len;
+       const char *str;
+
+       if (! *buf)
+               return -1;
+
+       /* fixed string */
+       if (sd->str) {
+               str = sd->str;
+               do {
+                       token_len = get_token_len(str);
+
+                       /* if token is too big... */
+                       if (token_len >= STR_TOKEN_SIZE - 1) {
+                               continue;
+                       }
+
+                       if ( strncmp(buf, str, token_len) ) {
+                               continue;
+                       }
+
+                       if ( !cmdline_isendoftoken(*(buf+token_len)) ) {
+                               continue;
+                       }
+
+                       break;
+               } while ( (str = get_next_token(str)) != NULL );
+
+               if (!str)
+                       return -1;
+       }
+       /* unspecified string */
+       else {
+               token_len=0;
+               while(!cmdline_isendoftoken(buf[token_len]) &&
+                     token_len < (STR_TOKEN_SIZE-1))
+                       token_len++;
+
+               /* return if token too long */
+               if (token_len >= STR_TOKEN_SIZE - 1) {
+                       return -1;
+               }
+       }
+
+       if (res) {
+               /* we are sure that token_len is < STR_TOKEN_SIZE-1 */
+               rte_snprintf(res, STR_TOKEN_SIZE, "%s", buf);
+               *((char *)res + token_len) = 0;
+       }
+
+       return token_len;
+}
+
+int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk)
+{
+       struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk;
+       struct cmdline_token_string_data *sd = &tk2->string_data;;
+       int ret=1;
+       const char *str;
+
+       if (!sd->str)
+               return 0;
+
+       str = sd->str;
+       while( (str = get_next_token(str)) != NULL ) {
+               ret++;
+       }
+       return ret;
+}
+
+int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
+                                   char *dstbuf, unsigned int size)
+{
+       struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk;
+       struct cmdline_token_string_data *sd = &tk2->string_data;;
+       const char *s;
+       unsigned int len;
+
+       s = sd->str;
+
+       while (idx-- && s)
+               s = get_next_token(s);
+
+       if (!s)
+               return -1;
+
+       len = get_token_len(s);
+       if (len > size - 1)
+               return -1;
+
+       memcpy(dstbuf, s, len);
+       dstbuf[len] = '\0';
+       return 0;
+}
+
+
+int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                           unsigned int size)
+{
+       struct cmdline_token_string *tk2 = (struct cmdline_token_string *)tk;
+       struct cmdline_token_string_data *sd = &tk2->string_data;;
+       const char *s;
+
+       s = sd->str;
+
+       if (s) {
+               if (get_next_token(s))
+                       rte_snprintf(dstbuf, size, MULTISTRING_HELP);
+               else
+                       rte_snprintf(dstbuf, size, FIXEDSTRING_HELP);
+       } else
+               rte_snprintf(dstbuf, size, ANYSTRING_HELP);
+
+       return 0;
+}
diff --git a/lib/librte_cmdline/cmdline_parse_string.h b/lib/librte_cmdline/cmdline_parse_string.h
new file mode 100644 (file)
index 0000000..35239b9
--- /dev/null
@@ -0,0 +1,113 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _PARSE_STRING_H_
+#define _PARSE_STRING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* size of a parsed string */
+#define STR_TOKEN_SIZE 128
+
+typedef char cmdline_fixed_string_t[STR_TOKEN_SIZE];
+
+struct cmdline_token_string_data {
+       const char *str;
+};
+
+struct cmdline_token_string {
+       struct cmdline_token_hdr hdr;
+       struct cmdline_token_string_data string_data;
+};
+typedef struct cmdline_token_string cmdline_parse_token_string_t;
+
+extern struct cmdline_token_ops cmdline_token_string_ops;
+
+int cmdline_parse_string(cmdline_parse_token_hdr_t *tk, const char *srcbuf,
+                        void *res);
+int cmdline_complete_get_nb_string(cmdline_parse_token_hdr_t *tk);
+int cmdline_complete_get_elt_string(cmdline_parse_token_hdr_t *tk, int idx,
+                                   char *dstbuf, unsigned int size);
+int cmdline_get_help_string(cmdline_parse_token_hdr_t *tk, char *dstbuf,
+                           unsigned int size);
+
+/*
+ * Warning! Not compatible with C++!
+ */
+#define TOKEN_STRING_INITIALIZER(structure, field, string)  \
+{                                                          \
+       .hdr = {                                            \
+               .ops = &cmdline_token_string_ops,           \
+               .offset = offsetof(structure, field),       \
+       },                                                  \
+       .string_data = {                                    \
+               .str = string,                              \
+       },                                                  \
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PARSE_STRING_H_ */
diff --git a/lib/librte_cmdline/cmdline_rdline.c b/lib/librte_cmdline/cmdline_rdline.c
new file mode 100644 (file)
index 0000000..edc479c
--- /dev/null
@@ -0,0 +1,675 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+
+#include "cmdline_cirbuf.h"
+#include "cmdline_rdline.h"
+
+static void rdline_puts(struct rdline *rdl, const char *buf);
+static void rdline_miniprintf(struct rdline *rdl,
+                             const char *buf, unsigned int val);
+
+#ifndef NO_RDLINE_HISTORY
+static void rdline_remove_old_history_item(struct rdline *rdl);
+static void rdline_remove_first_history_item(struct rdline *rdl);
+static unsigned int rdline_get_history_size(struct rdline *rdl);
+#endif /* !NO_RDLINE_HISTORY */
+
+
+/* isblank() needs _XOPEN_SOURCE >= 600 || _ISOC99_SOURCE, so use our
+ * own. */
+static int
+isblank2(char c)
+{
+       if (c == ' ' ||
+           c == '\t' )
+               return 1;
+       return 0;
+}
+
+void
+rdline_init(struct rdline *rdl,
+                rdline_write_char_t *write_char,
+                rdline_validate_t *validate,
+                rdline_complete_t *complete)
+{
+       memset(rdl, 0, sizeof(*rdl));
+       rdl->validate = validate;
+       rdl->complete = complete;
+       rdl->write_char = write_char;
+       rdl->status = RDLINE_INIT;
+#ifndef NO_RDLINE_HISTORY
+       cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
+#endif /* !NO_RDLINE_HISTORY */
+}
+
+void
+rdline_newline(struct rdline *rdl, const char *prompt)
+{
+       unsigned int i;
+
+       vt100_init(&rdl->vt100);
+       cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+       cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+
+       if (prompt != rdl->prompt)
+               memcpy(rdl->prompt, prompt, sizeof(rdl->prompt)-1);
+       rdl->prompt_size = strnlen(prompt, RDLINE_PROMPT_SIZE);
+
+       for (i=0 ; i<rdl->prompt_size ; i++)
+               rdl->write_char(rdl, rdl->prompt[i]);
+       rdl->status = RDLINE_RUNNING;
+
+#ifndef NO_RDLINE_HISTORY
+       rdl->history_cur_line = -1;
+#endif /* !NO_RDLINE_HISTORY */
+}
+
+void
+rdline_stop(struct rdline *rdl)
+{
+       rdl->status = RDLINE_INIT;
+}
+
+void
+rdline_quit(struct rdline *rdl)
+{
+       rdl->status = RDLINE_EXITED;
+}
+
+void
+rdline_restart(struct rdline *rdl)
+{
+       rdl->status = RDLINE_RUNNING;
+}
+
+void
+rdline_reset(struct rdline *rdl)
+{
+       vt100_init(&rdl->vt100);
+       cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+       cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+
+       rdl->status = RDLINE_RUNNING;
+
+#ifndef NO_RDLINE_HISTORY
+       rdl->history_cur_line = -1;
+#endif /* !NO_RDLINE_HISTORY */
+}
+
+const char *
+rdline_get_buffer(struct rdline *rdl)
+{
+       unsigned int len_l, len_r;
+       cirbuf_align_left(&rdl->left);
+       cirbuf_align_left(&rdl->right);
+
+       len_l = CIRBUF_GET_LEN(&rdl->left);
+       len_r = CIRBUF_GET_LEN(&rdl->right);
+       memcpy(rdl->left_buf+len_l, rdl->right_buf, len_r);
+
+       rdl->left_buf[len_l + len_r] = '\n';
+       rdl->left_buf[len_l + len_r + 1] = '\0';
+       return rdl->left_buf;
+}
+
+static void
+display_right_buffer(struct rdline *rdl, int force)
+{
+       unsigned int i;
+       char tmp;
+
+       if (!force && CIRBUF_IS_EMPTY(&rdl->right))
+               return;
+
+       rdline_puts(rdl, vt100_clear_right);
+       CIRBUF_FOREACH(&rdl->right, i, tmp) {
+               rdl->write_char(rdl, tmp);
+       }
+       if (!CIRBUF_IS_EMPTY(&rdl->right))
+               rdline_miniprintf(rdl, vt100_multi_left,
+                                 CIRBUF_GET_LEN(&rdl->right));
+}
+
+void
+rdline_redisplay(struct rdline *rdl)
+{
+       unsigned int i;
+       char tmp;
+
+       rdline_puts(rdl, vt100_home);
+       for (i=0 ; i<rdl->prompt_size ; i++)
+               rdl->write_char(rdl, rdl->prompt[i]);
+       CIRBUF_FOREACH(&rdl->left, i, tmp) {
+               rdl->write_char(rdl, tmp);
+       }
+       display_right_buffer(rdl, 1);
+}
+
+int
+rdline_char_in(struct rdline *rdl, char c)
+{
+       unsigned int i;
+       int cmd;
+       char tmp;
+#ifndef NO_RDLINE_HISTORY
+       char *buf;
+#endif
+
+       if (rdl->status == RDLINE_EXITED)
+               return RDLINE_RES_EXITED;
+       if (rdl->status != RDLINE_RUNNING)
+               return RDLINE_RES_NOT_RUNNING;
+
+       cmd = vt100_parser(&rdl->vt100, c);
+       if (cmd == -2)
+               return RDLINE_RES_SUCCESS;
+
+       if (cmd >= 0) {
+               switch (cmd) {
+               case CMDLINE_KEY_CTRL_B:
+               case CMDLINE_KEY_LEFT_ARR:
+                       if (CIRBUF_IS_EMPTY(&rdl->left))
+                               break;
+                       tmp = cirbuf_get_tail(&rdl->left);
+                       cirbuf_del_tail(&rdl->left);
+                       cirbuf_add_head(&rdl->right, tmp);
+                       rdline_puts(rdl, vt100_left_arr);
+                       break;
+
+               case CMDLINE_KEY_CTRL_F:
+               case CMDLINE_KEY_RIGHT_ARR:
+                       if (CIRBUF_IS_EMPTY(&rdl->right))
+                               break;
+                       tmp = cirbuf_get_head(&rdl->right);
+                       cirbuf_del_head(&rdl->right);
+                       cirbuf_add_tail(&rdl->left, tmp);
+                       rdline_puts(rdl, vt100_right_arr);
+                       break;
+
+               case CMDLINE_KEY_WLEFT:
+                       while (! CIRBUF_IS_EMPTY(&rdl->left) &&
+                              (tmp = cirbuf_get_tail(&rdl->left)) &&
+                              isblank2(tmp)) {
+                               rdline_puts(rdl, vt100_left_arr);
+                               cirbuf_del_tail(&rdl->left);
+                               cirbuf_add_head(&rdl->right, tmp);
+                       }
+                       while (! CIRBUF_IS_EMPTY(&rdl->left) &&
+                              (tmp = cirbuf_get_tail(&rdl->left)) &&
+                              !isblank2(tmp)) {
+                               rdline_puts(rdl, vt100_left_arr);
+                               cirbuf_del_tail(&rdl->left);
+                               cirbuf_add_head(&rdl->right, tmp);
+                       }
+                       break;
+
+               case CMDLINE_KEY_WRIGHT:
+                       while (! CIRBUF_IS_EMPTY(&rdl->right) &&
+                              (tmp = cirbuf_get_head(&rdl->right)) &&
+                              isblank2(tmp)) {
+                               rdline_puts(rdl, vt100_right_arr);
+                               cirbuf_del_head(&rdl->right);
+                               cirbuf_add_tail(&rdl->left, tmp);
+                       }
+                       while (! CIRBUF_IS_EMPTY(&rdl->right) &&
+                              (tmp = cirbuf_get_head(&rdl->right)) &&
+                              !isblank2(tmp)) {
+                               rdline_puts(rdl, vt100_right_arr);
+                               cirbuf_del_head(&rdl->right);
+                               cirbuf_add_tail(&rdl->left, tmp);
+                       }
+                       break;
+
+               case CMDLINE_KEY_BKSPACE:
+                       if(!cirbuf_del_tail_safe(&rdl->left)) {
+                               rdline_puts(rdl, vt100_bs);
+                               display_right_buffer(rdl, 1);
+                       }
+                       break;
+
+               case CMDLINE_KEY_META_BKSPACE:
+               case CMDLINE_KEY_CTRL_W:
+                       while (! CIRBUF_IS_EMPTY(&rdl->left) && isblank2(cirbuf_get_tail(&rdl->left))) {
+                               rdline_puts(rdl, vt100_bs);
+                               cirbuf_del_tail(&rdl->left);
+                       }
+                       while (! CIRBUF_IS_EMPTY(&rdl->left) && !isblank2(cirbuf_get_tail(&rdl->left))) {
+                               rdline_puts(rdl, vt100_bs);
+                               cirbuf_del_tail(&rdl->left);
+                       }
+                       display_right_buffer(rdl, 1);
+                       break;
+
+               case CMDLINE_KEY_META_D:
+                       while (! CIRBUF_IS_EMPTY(&rdl->right) && isblank2(cirbuf_get_head(&rdl->right)))
+                               cirbuf_del_head(&rdl->right);
+                       while (! CIRBUF_IS_EMPTY(&rdl->right) && !isblank2(cirbuf_get_head(&rdl->right)))
+                               cirbuf_del_head(&rdl->right);
+                       display_right_buffer(rdl, 1);
+                       break;
+
+               case CMDLINE_KEY_SUPPR:
+               case CMDLINE_KEY_CTRL_D:
+                       if (cmd == CMDLINE_KEY_CTRL_D &&
+                           CIRBUF_IS_EMPTY(&rdl->left) &&
+                           CIRBUF_IS_EMPTY(&rdl->right)) {
+                               return RDLINE_RES_EOF;
+                       }
+                       if (!cirbuf_del_head_safe(&rdl->right)) {
+                               display_right_buffer(rdl, 1);
+                       }
+                       break;
+
+               case CMDLINE_KEY_CTRL_A:
+                       if (CIRBUF_IS_EMPTY(&rdl->left))
+                               break;
+                       rdline_miniprintf(rdl, vt100_multi_left,
+                                           CIRBUF_GET_LEN(&rdl->left));
+                       while (! CIRBUF_IS_EMPTY(&rdl->left)) {
+                               tmp = cirbuf_get_tail(&rdl->left);
+                               cirbuf_del_tail(&rdl->left);
+                               cirbuf_add_head(&rdl->right, tmp);
+                       }
+                       break;
+
+               case CMDLINE_KEY_CTRL_E:
+                       if (CIRBUF_IS_EMPTY(&rdl->right))
+                               break;
+                       rdline_miniprintf(rdl, vt100_multi_right,
+                                           CIRBUF_GET_LEN(&rdl->right));
+                       while (! CIRBUF_IS_EMPTY(&rdl->right)) {
+                               tmp = cirbuf_get_head(&rdl->right);
+                               cirbuf_del_head(&rdl->right);
+                               cirbuf_add_tail(&rdl->left, tmp);
+                       }
+                       break;
+
+#ifndef NO_RDLINE_KILL_BUF
+               case CMDLINE_KEY_CTRL_K:
+                       cirbuf_get_buf_head(&rdl->right, rdl->kill_buf, RDLINE_BUF_SIZE);
+                       rdl->kill_size = CIRBUF_GET_LEN(&rdl->right);
+                       cirbuf_del_buf_head(&rdl->right, rdl->kill_size);
+                       rdline_puts(rdl, vt100_clear_right);
+                       break;
+
+               case CMDLINE_KEY_CTRL_Y:
+                       i=0;
+                       while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
+                             RDLINE_BUF_SIZE &&
+                             i < rdl->kill_size) {
+                               cirbuf_add_tail(&rdl->left, rdl->kill_buf[i]);
+                               rdl->write_char(rdl, rdl->kill_buf[i]);
+                               i++;
+                       }
+                       display_right_buffer(rdl, 0);
+                       break;
+#endif /* !NO_RDLINE_KILL_BUF */
+
+               case CMDLINE_KEY_CTRL_C:
+                       rdline_puts(rdl, "\r\n");
+                       rdline_newline(rdl, rdl->prompt);
+                       break;
+
+               case CMDLINE_KEY_CTRL_L:
+                       rdline_redisplay(rdl);
+                       break;
+
+               case CMDLINE_KEY_TAB:
+               case CMDLINE_KEY_HELP:
+                       cirbuf_align_left(&rdl->left);
+                       rdl->left_buf[CIRBUF_GET_LEN(&rdl->left)] = '\0';
+                       if (rdl->complete) {
+                               char tmp_buf[BUFSIZ];
+                               int complete_state;
+                               int ret;
+                               unsigned int tmp_size;
+
+                               if (cmd == CMDLINE_KEY_TAB)
+                                       complete_state = 0;
+                               else
+                                       complete_state = -1;
+
+                               /* see in parse.h for help on complete() */
+                               ret = rdl->complete(rdl, rdl->left_buf,
+                                                   tmp_buf, sizeof(tmp_buf),
+                                                   &complete_state);
+                               /* no completion or error */
+                               if (ret <= 0) {
+                                       return RDLINE_RES_COMPLETE;
+                               }
+
+                               tmp_size = strnlen(tmp_buf, sizeof(tmp_buf));
+                               /* add chars */
+                               if (ret == RDLINE_RES_COMPLETE) {
+                                       i=0;
+                                       while(CIRBUF_GET_LEN(&rdl->right) + CIRBUF_GET_LEN(&rdl->left) <
+                                             RDLINE_BUF_SIZE &&
+                                             i < tmp_size) {
+                                               cirbuf_add_tail(&rdl->left, tmp_buf[i]);
+                                               rdl->write_char(rdl, tmp_buf[i]);
+                                               i++;
+                                       }
+                                       display_right_buffer(rdl, 1);
+                                       return RDLINE_RES_COMPLETE; /* ?? */
+                               }
+
+                               /* choice */
+                               rdline_puts(rdl, "\r\n");
+                               while (ret) {
+                                       rdl->write_char(rdl, ' ');
+                                       for (i=0 ; tmp_buf[i] ; i++)
+                                               rdl->write_char(rdl, tmp_buf[i]);
+                                       rdline_puts(rdl, "\r\n");
+                                       ret = rdl->complete(rdl, rdl->left_buf,
+                                                           tmp_buf, sizeof(tmp_buf),
+                                                           &complete_state);
+                               }
+
+                               rdline_redisplay(rdl);
+                       }
+                       return RDLINE_RES_COMPLETE;
+
+               case CMDLINE_KEY_RETURN:
+               case CMDLINE_KEY_RETURN2:
+                       rdline_get_buffer(rdl);
+                       rdl->status = RDLINE_INIT;
+                       rdline_puts(rdl, "\r\n");
+#ifndef NO_RDLINE_HISTORY
+                       if (rdl->history_cur_line != -1)
+                               rdline_remove_first_history_item(rdl);
+#endif
+
+                       if (rdl->validate)
+                               rdl->validate(rdl, rdl->left_buf, CIRBUF_GET_LEN(&rdl->left)+2);
+                       /* user may have stopped rdline */
+                       if (rdl->status == RDLINE_EXITED)
+                               return RDLINE_RES_EXITED;
+                       return RDLINE_RES_VALIDATED;
+
+#ifndef NO_RDLINE_HISTORY
+               case CMDLINE_KEY_UP_ARR:
+               case CMDLINE_KEY_CTRL_P:
+                       if (rdl->history_cur_line == 0) {
+                               rdline_remove_first_history_item(rdl);
+                       }
+                       if (rdl->history_cur_line <= 0) {
+                               rdline_add_history(rdl, rdline_get_buffer(rdl));
+                               rdl->history_cur_line = 0;
+                       }
+
+                       buf = rdline_get_history_item(rdl, rdl->history_cur_line + 1);
+                       if (!buf)
+                               break;
+
+                       rdl->history_cur_line ++;
+                       vt100_init(&rdl->vt100);
+                       cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+                       cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+                       cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
+                       rdline_redisplay(rdl);
+                       break;
+
+               case CMDLINE_KEY_DOWN_ARR:
+               case CMDLINE_KEY_CTRL_N:
+                       if (rdl->history_cur_line - 1 < 0)
+                               break;
+
+                       rdl->history_cur_line --;
+                       buf = rdline_get_history_item(rdl, rdl->history_cur_line);
+                       if (!buf)
+                               break;
+                       vt100_init(&rdl->vt100);
+                       cirbuf_init(&rdl->left, rdl->left_buf, 0, RDLINE_BUF_SIZE);
+                       cirbuf_init(&rdl->right, rdl->right_buf, 0, RDLINE_BUF_SIZE);
+                       cirbuf_add_buf_tail(&rdl->left, buf, strnlen(buf, RDLINE_BUF_SIZE));
+                       rdline_redisplay(rdl);
+
+                       break;
+#endif /* !NO_RDLINE_HISTORY */
+
+
+               default:
+                       break;
+               }
+
+               return RDLINE_RES_SUCCESS;
+       }
+
+       if (!isprint((int)c))
+               return RDLINE_RES_SUCCESS;
+
+       /* standard chars */
+       if (CIRBUF_GET_LEN(&rdl->left) + CIRBUF_GET_LEN(&rdl->right) >= RDLINE_BUF_SIZE)
+               return RDLINE_RES_SUCCESS;
+
+       if (cirbuf_add_tail_safe(&rdl->left, c))
+               return RDLINE_RES_SUCCESS;
+
+       rdl->write_char(rdl, c);
+       display_right_buffer(rdl, 0);
+
+       return RDLINE_RES_SUCCESS;
+}
+
+
+/* HISTORY */
+
+#ifndef NO_RDLINE_HISTORY
+static void
+rdline_remove_old_history_item(struct rdline * rdl)
+{
+       char tmp;
+
+       while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
+               tmp = cirbuf_get_head(&rdl->history);
+               cirbuf_del_head(&rdl->history);
+               if (!tmp)
+                       break;
+       }
+}
+
+static void
+rdline_remove_first_history_item(struct rdline * rdl)
+{
+       char tmp;
+
+       if ( CIRBUF_IS_EMPTY(&rdl->history) ) {
+               return;
+       }
+       else {
+               cirbuf_del_tail(&rdl->history);
+       }
+
+       while (! CIRBUF_IS_EMPTY(&rdl->history) ) {
+               tmp = cirbuf_get_tail(&rdl->history);
+               if (!tmp)
+                       break;
+               cirbuf_del_tail(&rdl->history);
+       }
+}
+
+static unsigned int
+rdline_get_history_size(struct rdline * rdl)
+{
+       unsigned int i, tmp, ret=0;
+
+       CIRBUF_FOREACH(&rdl->history, i, tmp) {
+               if (tmp == 0)
+                       ret ++;
+       }
+
+       return ret;
+}
+
+char *
+rdline_get_history_item(struct rdline * rdl, unsigned int idx)
+{
+       unsigned int len, i, tmp;
+
+       len = rdline_get_history_size(rdl);
+       if ( idx >= len ) {
+               return NULL;
+       }
+
+       cirbuf_align_left(&rdl->history);
+
+       CIRBUF_FOREACH(&rdl->history, i, tmp) {
+               if ( idx == len - 1) {
+                       return rdl->history_buf + i;
+               }
+               if (tmp == 0)
+                       len --;
+       }
+
+       return NULL;
+}
+
+int
+rdline_add_history(struct rdline * rdl, const char * buf)
+{
+       unsigned int len, i;
+
+       len = strnlen(buf, RDLINE_BUF_SIZE);
+       for (i=0; i<len ; i++) {
+               if (buf[i] == '\n') {
+                       len = i;
+                       break;
+               }
+       }
+
+       if ( len >= RDLINE_HISTORY_BUF_SIZE )
+               return -1;
+
+       while ( len >= CIRBUF_GET_FREELEN(&rdl->history) ) {
+               rdline_remove_old_history_item(rdl);
+       }
+
+       cirbuf_add_buf_tail(&rdl->history, buf, len);
+       cirbuf_add_tail(&rdl->history, 0);
+
+       return 0;
+}
+
+void
+rdline_clear_history(struct rdline * rdl)
+{
+       cirbuf_init(&rdl->history, rdl->history_buf, 0, RDLINE_HISTORY_BUF_SIZE);
+}
+
+#else /* !NO_RDLINE_HISTORY */
+
+int rdline_add_history(struct rdline * rdl, const char * buf) {return -1;}
+void rdline_clear_history(struct rdline * rdl) {}
+char * rdline_get_history_item(struct rdline * rdl, unsigned int i) {return NULL;}
+
+
+#endif /* !NO_RDLINE_HISTORY */
+
+
+/* STATIC USEFUL FUNCS */
+
+static void
+rdline_puts(struct rdline * rdl, const char * buf)
+{
+       char c;
+       while ( (c = *(buf++)) != '\0' ) {
+               rdl->write_char(rdl, c);
+       }
+}
+
+/* a very very basic printf with one arg and one format 'u' */
+static void
+rdline_miniprintf(struct rdline *rdl, const char * buf, unsigned int val)
+{
+       char c, started=0, div=100;
+
+       while ( (c=*(buf++)) ) {
+               if (c != '%') {
+                       rdl->write_char(rdl, c);
+                       continue;
+               }
+               c = *(buf++);
+               if (c != 'u') {
+                       rdl->write_char(rdl, '%');
+                       rdl->write_char(rdl, c);
+                       continue;
+               }
+               /* val is never more than 255 */
+               while (div) {
+                       c = (char)(val / div);
+                       if (c || started) {
+                               rdl->write_char(rdl, (char)(c+'0'));
+                               started = 1;
+                       }
+                       val %= div;
+                       div /= 10;
+               }
+       }
+}
+
diff --git a/lib/librte_cmdline/cmdline_rdline.h b/lib/librte_cmdline/cmdline_rdline.h
new file mode 100644 (file)
index 0000000..1ff9845
--- /dev/null
@@ -0,0 +1,260 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDLINE_H_
+#define _RDLINE_H_
+
+/**
+ * This file is a small equivalent to the GNU readline library, but it
+ * was originally designed for small systems, like Atmel AVR
+ * microcontrollers (8 bits). Indeed, we don't use any malloc that is
+ * sometimes not implemented (or just not recommended) on such
+ * systems.
+ *
+ * Obviously, it does not support as many things as the GNU readline,
+ * but at least it supports some interesting features like a kill
+ * buffer and a command history.
+ *
+ * It also have a feature that does not have the GNU readline (as far
+ * as I know): we can have several instances of it running at the same
+ * time, even on a monothread program, since it works with callbacks.
+ *
+ * The lib is designed for a client-side or a server-side use:
+ * - server-side: the server receives all data from a socket, including
+ *   control chars, like arrows, tabulations, ... The client is
+ *   very simple, it can be a telnet or a minicom through a serial line.
+ * - client-side: the client receives its data through its stdin for
+ *   instance.
+ */
+
+#include <cmdline_cirbuf.h>
+#include <cmdline_vt100.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* configuration */
+#define RDLINE_BUF_SIZE 256
+#define RDLINE_PROMPT_SIZE  32
+#define RDLINE_VT100_BUF_SIZE  8
+#define RDLINE_HISTORY_BUF_SIZE BUFSIZ
+#define RDLINE_HISTORY_MAX_LINE 64
+
+enum rdline_status {
+       RDLINE_INIT,
+       RDLINE_RUNNING,
+       RDLINE_EXITED
+};
+
+struct rdline;
+
+typedef int (rdline_write_char_t)(struct rdline *rdl, char);
+typedef void (rdline_validate_t)(struct rdline *rdl,
+                                const char *buf, unsigned int size);
+typedef int (rdline_complete_t)(struct rdline *rdl, const char *buf,
+                               char *dstbuf, unsigned int dstsize,
+                               int *state);
+
+struct rdline {
+       enum rdline_status status;
+       /* rdline bufs */
+       struct cirbuf left;
+       struct cirbuf right;
+       char left_buf[RDLINE_BUF_SIZE+2]; /* reserve 2 chars for the \n\0 */
+       char right_buf[RDLINE_BUF_SIZE];
+
+       char prompt[RDLINE_PROMPT_SIZE];
+       unsigned int prompt_size;
+
+#ifndef NO_RDLINE_KILL_BUF
+       char kill_buf[RDLINE_BUF_SIZE];
+       unsigned int kill_size;
+#endif
+
+#ifndef NO_RDLINE_HISTORY
+       /* history */
+       struct cirbuf history;
+       char history_buf[RDLINE_HISTORY_BUF_SIZE];
+       int history_cur_line;
+#endif
+
+       /* callbacks and func pointers */
+       rdline_write_char_t *write_char;
+       rdline_validate_t *validate;
+       rdline_complete_t *complete;
+
+       /* vt100 parser */
+       struct cmdline_vt100 vt100;
+
+       /* opaque pointer */
+       void *opaque;
+};
+
+/**
+ * Init fields for a struct rdline. Call this only once at the beginning
+ * of your program.
+ * \param rdl A pointer to an uninitialized struct rdline
+ * \param write_char The function used by the function to write a character
+ * \param validate A pointer to the function to execute when the
+ *                 user validates the buffer.
+ * \param complete A pointer to the function to execute when the
+ *                 user completes the buffer.
+ */
+void rdline_init(struct rdline *rdl,
+                rdline_write_char_t *write_char,
+                rdline_validate_t *validate,
+                rdline_complete_t *complete);
+
+
+/**
+ * Init the current buffer, and display a prompt.
+ * \param rdl A pointer to a struct rdline
+ * \param prompt A string containing the prompt
+ */
+void rdline_newline(struct rdline *rdl, const char *prompt);
+
+/**
+ * Call it and all received chars will be ignored.
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_stop(struct rdline *rdl);
+
+/**
+ * Same than rdline_stop() except that next calls to rdline_char_in()
+ * will return RDLINE_RES_EXITED.
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_quit(struct rdline *rdl);
+
+/**
+ * Restart after a call to rdline_stop() or rdline_quit()
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_restart(struct rdline *rdl);
+
+/**
+ * Redisplay the current buffer
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_redisplay(struct rdline *rdl);
+
+/**
+ * Reset the current buffer and setup for a new line.
+ *  \param rdl A pointer to a struct rdline
+ */
+void rdline_reset(struct rdline *rdl);
+
+
+/* return status for rdline_char_in() */
+#define RDLINE_RES_SUCCESS       0
+#define RDLINE_RES_VALIDATED     1
+#define RDLINE_RES_COMPLETE      2
+#define RDLINE_RES_NOT_RUNNING  -1
+#define RDLINE_RES_EOF          -2
+#define RDLINE_RES_EXITED       -3
+
+/**
+ * append a char to the readline buffer.
+ * Return RDLINE_RES_VALIDATE when the line has been validated.
+ * Return RDLINE_RES_COMPLETE when the user asked to complete the buffer.
+ * Return RDLINE_RES_NOT_RUNNING if it is not running.
+ * Return RDLINE_RES_EOF if EOF (ctrl-d on an empty line).
+ * Else return RDLINE_RES_SUCCESS.
+ * XXX error case when the buffer is full ?
+ *
+ * \param rdl A pointer to a struct rdline
+ * \param c The character to append
+ */
+int rdline_char_in(struct rdline *rdl, char c);
+
+/**
+ * Return the current buffer, terminated by '\0'.
+ * \param rdl A pointer to a struct rdline
+ */
+const char *rdline_get_buffer(struct rdline *rdl);
+
+
+/**
+ * Add the buffer to history.
+ * return < 0 on error.
+ * \param rdl A pointer to a struct rdline
+ * \param buf A buffer that is terminated by '\0'
+ */
+int rdline_add_history(struct rdline *rdl, const char *buf);
+
+/**
+ * Clear current history
+ * \param rdl A pointer to a struct rdline
+ */
+void rdline_clear_history(struct rdline *rdl);
+
+/**
+ * Get the i-th history item
+ */
+char *rdline_get_history_item(struct rdline *rdl, unsigned int i);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RDLINE_H_ */
diff --git a/lib/librte_cmdline/cmdline_socket.c b/lib/librte_cmdline/cmdline_socket.c
new file mode 100644 (file)
index 0000000..21d32d9
--- /dev/null
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <termios.h>
+
+#include "cmdline_parse.h"
+#include "cmdline_rdline.h"
+#include "cmdline_socket.h"
+#include "cmdline.h"
+
+struct cmdline *
+cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path)
+{
+       int fd;
+       fd = open(path, O_RDONLY, 0);
+       if (fd < 0) {
+               dprintf("open() failed\n");
+               return NULL;
+       }
+       return (cmdline_new(ctx, prompt, fd, -1));
+}
+
+struct cmdline *
+cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt)
+{
+       struct cmdline *cl;
+#ifdef RTE_EXEC_ENV_LINUXAPP
+       struct termios oldterm, term;
+
+       tcgetattr(0, &oldterm);
+       memcpy(&term, &oldterm, sizeof(term));
+       term.c_lflag &= ~(ICANON | ECHO | ISIG);
+       tcsetattr(0, TCSANOW, &term);
+       setbuf(stdin, NULL);
+#endif
+
+       cl = cmdline_new(ctx, prompt, 0, 1);
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+       memcpy(&cl->oldterm, &oldterm, sizeof(term));
+#endif
+       return cl;
+}
+
+void
+cmdline_stdin_exit(struct cmdline *cl)
+{
+#ifdef RTE_EXEC_ENV_LINUXAPP
+       tcsetattr(fileno(stdin), TCSANOW, &cl->oldterm);
+#else
+       /* silent the compiler */
+       (void)cl;
+#endif
+}
diff --git a/lib/librte_cmdline/cmdline_socket.h b/lib/librte_cmdline/cmdline_socket.h
new file mode 100644 (file)
index 0000000..368836e
--- /dev/null
@@ -0,0 +1,78 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_SOCKET_H_
+#define _CMDLINE_SOCKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct cmdline *cmdline_file_new(cmdline_parse_ctx_t *ctx, const char *prompt, const char *path);
+struct cmdline *cmdline_stdin_new(cmdline_parse_ctx_t *ctx, const char *prompt);
+void cmdline_stdin_exit(struct cmdline *cl);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _CMDLINE_SOCKET_H_ */
diff --git a/lib/librte_cmdline/cmdline_vt100.c b/lib/librte_cmdline/cmdline_vt100.c
new file mode 100644 (file)
index 0000000..ebdc538
--- /dev/null
@@ -0,0 +1,182 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <termios.h>
+
+#include "cmdline_vt100.h"
+
+const char *cmdline_vt100_commands[] = {
+       vt100_up_arr,
+       vt100_down_arr,
+       vt100_right_arr,
+       vt100_left_arr,
+       "\177",
+       "\n",
+       "\001",
+       "\005",
+       "\013",
+       "\031",
+       "\003",
+       "\006",
+       "\002",
+       vt100_suppr,
+       vt100_tab,
+       "\004",
+       "\014",
+       "\r",
+       "\033\177",
+       vt100_word_left,
+       vt100_word_right,
+       "?",
+       "\027",
+       "\020",
+       "\016",
+       "\033\144",
+};
+
+void
+vt100_init(struct cmdline_vt100 *vt)
+{
+       vt->state = CMDLINE_VT100_INIT;
+}
+
+
+static int
+match_command(char *buf, unsigned int size)
+{
+       const char *cmd;
+       size_t cmdlen;
+       unsigned int i = 0;
+
+       for (i=0 ; i<sizeof(cmdline_vt100_commands)/sizeof(const char *) ; i++) {
+               cmd = *(cmdline_vt100_commands + i);
+
+               cmdlen = strnlen(cmd, CMDLINE_VT100_BUF_SIZE);
+               if (size == cmdlen &&
+                   !strncmp(buf, cmd, cmdlen)) {
+                       return i;
+               }
+       }
+
+       return -1;
+}
+
+int
+vt100_parser(struct cmdline_vt100 *vt, char ch)
+{
+       unsigned int size;
+       uint8_t c = (uint8_t) ch;
+
+       if (vt->bufpos >= CMDLINE_VT100_BUF_SIZE) {
+               vt->state = CMDLINE_VT100_INIT;
+               vt->bufpos = 0;
+       }
+
+       vt->buf[vt->bufpos++] = c;
+       size = vt->bufpos;
+
+       switch (vt->state) {
+       case CMDLINE_VT100_INIT:
+               if (c == 033) {
+                       vt->state = CMDLINE_VT100_ESCAPE;
+               }
+               else {
+                       vt->bufpos = 0;
+                       goto match_command;
+               }
+               break;
+
+       case CMDLINE_VT100_ESCAPE:
+               if (c == 0133) {
+                       vt->state = CMDLINE_VT100_ESCAPE_CSI;
+               }
+               else if (c >= 060 && c <= 0177) { /* XXX 0177 ? */
+                       vt->bufpos = 0;
+                       vt->state = CMDLINE_VT100_INIT;
+                       goto match_command;
+               }
+               break;
+
+       case CMDLINE_VT100_ESCAPE_CSI:
+               if (c >= 0100 && c <= 0176) {
+                       vt->bufpos = 0;
+                       vt->state = CMDLINE_VT100_INIT;
+                       goto match_command;
+               }
+               break;
+
+       default:
+               vt->bufpos = 0;
+               break;
+       }
+
+       return -2;
+
+ match_command:
+       return match_command(vt->buf, size);
+}
diff --git a/lib/librte_cmdline/cmdline_vt100.h b/lib/librte_cmdline/cmdline_vt100.h
new file mode 100644 (file)
index 0000000..28d048a
--- /dev/null
@@ -0,0 +1,153 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 2009, Olivier MATZ <zer0@droids-corp.org>
+ * All rights reserved.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of the University of California, Berkeley nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _CMDLINE_VT100_H_
+#define _CMDLINE_VT100_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define vt100_bell         "\007"
+#define vt100_bs           "\010"
+#define vt100_bs_clear     "\010 \010"
+#define vt100_tab          "\011"
+#define vt100_crnl         "\012\015"
+#define vt100_clear_right  "\033[0K"
+#define vt100_clear_left   "\033[1K"
+#define vt100_clear_down   "\033[0J"
+#define vt100_clear_up     "\033[1J"
+#define vt100_clear_line   "\033[2K"
+#define vt100_clear_screen "\033[2J"
+#define vt100_up_arr       "\033\133\101"
+#define vt100_down_arr     "\033\133\102"
+#define vt100_right_arr    "\033\133\103"
+#define vt100_left_arr     "\033\133\104"
+#define vt100_multi_right  "\033\133%uC"
+#define vt100_multi_left   "\033\133%uD"
+#define vt100_suppr        "\033\133\063\176"
+#define vt100_home         "\033M\033E"
+#define vt100_word_left    "\033\142"
+#define vt100_word_right   "\033\146"
+
+/* Result of parsing : it must be synchronized with
+ * cmdline_vt100_commands[] in vt100.c */
+#define CMDLINE_KEY_UP_ARR 0
+#define CMDLINE_KEY_DOWN_ARR 1
+#define CMDLINE_KEY_RIGHT_ARR 2
+#define CMDLINE_KEY_LEFT_ARR 3
+#define CMDLINE_KEY_BKSPACE 4
+#define CMDLINE_KEY_RETURN 5
+#define CMDLINE_KEY_CTRL_A 6
+#define CMDLINE_KEY_CTRL_E 7
+#define CMDLINE_KEY_CTRL_K 8
+#define CMDLINE_KEY_CTRL_Y 9
+#define CMDLINE_KEY_CTRL_C 10
+#define CMDLINE_KEY_CTRL_F 11
+#define CMDLINE_KEY_CTRL_B 12
+#define CMDLINE_KEY_SUPPR 13
+#define CMDLINE_KEY_TAB 14
+#define CMDLINE_KEY_CTRL_D 15
+#define CMDLINE_KEY_CTRL_L 16
+#define CMDLINE_KEY_RETURN2 17
+#define CMDLINE_KEY_META_BKSPACE 18
+#define CMDLINE_KEY_WLEFT 19
+#define CMDLINE_KEY_WRIGHT 20
+#define CMDLINE_KEY_HELP 21
+#define CMDLINE_KEY_CTRL_W 22
+#define CMDLINE_KEY_CTRL_P 23
+#define CMDLINE_KEY_CTRL_N 24
+#define CMDLINE_KEY_META_D 25
+
+extern const char *cmdline_vt100_commands[];
+
+enum cmdline_vt100_parser_state {
+       CMDLINE_VT100_INIT,
+       CMDLINE_VT100_ESCAPE,
+       CMDLINE_VT100_ESCAPE_CSI
+};
+
+#define CMDLINE_VT100_BUF_SIZE 8
+struct cmdline_vt100 {
+       uint8_t bufpos;
+       char buf[CMDLINE_VT100_BUF_SIZE];
+       enum cmdline_vt100_parser_state state;
+};
+
+/**
+ * Init
+ */
+void vt100_init(struct cmdline_vt100 *vt);
+
+/**
+ * Input a new character.
+ * Return -1 if the character is not part of a control sequence
+ * Return -2 if c is not the last char of a control sequence
+ * Else return the index in vt100_commands[]
+ */
+int vt100_parser(struct cmdline_vt100 *vt, char c);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_eal/Makefile b/lib/librte_eal/Makefile
new file mode 100644 (file)
index 0000000..d061060
--- /dev/null
@@ -0,0 +1,41 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += common
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += linuxapp
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BAREMETAL) += baremetal
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_BAREMETAL) += common
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/common/Makefile b/lib/librte_eal/common/Makefile
new file mode 100644 (file)
index 0000000..9a42bc7
--- /dev/null
@@ -0,0 +1,56 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+INC := rte_atomic.h rte_branch_prediction.h rte_byteorder.h rte_common.h
+INC += rte_cycles.h rte_debug.h rte_eal.h rte_errno.h rte_launch.h rte_lcore.h
+INC += rte_log.h rte_memcpy.h rte_memory.h rte_memzone.h rte_pci.h
+INC += rte_pci_dev_ids.h rte_per_lcore.h rte_prefetch.h rte_random.h
+INC += rte_rwlock.h rte_spinlock.h rte_tailq.h rte_interrupts.h rte_alarm.h
+INC += rte_string_fns.h rte_cpuflags.h rte_version.h
+
+ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
+INC += rte_warnings.h
+endif
+
+ARCH_INC := rte_atomic.h
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include := $(addprefix include/,$(INC))
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL)-include/arch := \
+       $(addprefix include/$(RTE_ARCH)/arch/,$(ARCH_INC))
+
+# add libc if configured
+DEPDIRS-$(CONFIG_RTE_LIBC) += lib/libc
+
+include $(RTE_SDK)/mk/rte.install.mk
diff --git a/lib/librte_eal/common/eal_common_cpuflags.c b/lib/librte_eal/common/eal_common_cpuflags.c
new file mode 100644 (file)
index 0000000..54293e5
--- /dev/null
@@ -0,0 +1,265 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <rte_cpuflags.h>
+
+/*
+ * This should prevent use of advanced instruction sets in this file. Otherwise
+ * the check function itself could cause a crash.
+ */
+#ifdef __INTEL_COMPILER
+#pragma optimize ("", off)
+#else
+#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION > 404000
+#pragma GCC optimize ("O0")
+#endif
+#endif
+
+/**
+ * Enumeration of CPU registers
+ */
+enum cpu_register_t {
+       REG_EAX = 0,
+       REG_EBX,
+       REG_ECX,
+       REG_EDX,
+};
+
+/**
+ * Parameters for CPUID instruction
+ */
+struct cpuid_parameters_t {
+       uint32_t eax;
+       uint32_t ebx;
+       uint32_t ecx;
+       uint32_t edx;
+       enum cpu_register_t return_register;
+};
+
+#define CPU_FLAG_NAME_MAX_LEN 64
+
+/**
+ * Struct to hold a processor feature entry
+ */
+struct feature_entry {
+       enum rte_cpu_flag_t feature;            /**< feature name */
+       char name[CPU_FLAG_NAME_MAX_LEN];       /**< String for printing */
+       struct cpuid_parameters_t params;       /**< cpuid parameters */
+       uint32_t feature_mask;                  /**< bitmask for feature */
+};
+
+#define FEAT_DEF(f) RTE_CPUFLAG_##f, #f
+
+/**
+ * An array that holds feature entries
+ */
+static const struct feature_entry cpu_feature_table[] = {
+       {FEAT_DEF(SSE3),              {0x1, 0, 0, 0, REG_ECX}, 0x00000001},
+       {FEAT_DEF(PCLMULQDQ),         {0x1, 0, 0, 0, REG_ECX}, 0x00000002},
+       {FEAT_DEF(DTES64),            {0x1, 0, 0, 0, REG_ECX}, 0x00000004},
+       {FEAT_DEF(MONITOR),           {0x1, 0, 0, 0, REG_ECX}, 0x00000008},
+       {FEAT_DEF(DS_CPL),            {0x1, 0, 0, 0, REG_ECX}, 0x00000010},
+       {FEAT_DEF(VMX),               {0x1, 0, 0, 0, REG_ECX}, 0x00000020},
+       {FEAT_DEF(SMX),               {0x1, 0, 0, 0, REG_ECX}, 0x00000040},
+       {FEAT_DEF(EIST),              {0x1, 0, 0, 0, REG_ECX}, 0x00000080},
+       {FEAT_DEF(TM2),               {0x1, 0, 0, 0, REG_ECX}, 0x00000100},
+       {FEAT_DEF(SSSE3),             {0x1, 0, 0, 0, REG_ECX}, 0x00000200},
+       {FEAT_DEF(CNXT_ID),           {0x1, 0, 0, 0, REG_ECX}, 0x00000400},
+       {FEAT_DEF(FMA),               {0x1, 0, 0, 0, REG_ECX}, 0x00001000},
+       {FEAT_DEF(CMPXCHG16B),        {0x1, 0, 0, 0, REG_ECX}, 0x00002000},
+       {FEAT_DEF(XTPR),              {0x1, 0, 0, 0, REG_ECX}, 0x00004000},
+       {FEAT_DEF(PDCM),              {0x1, 0, 0, 0, REG_ECX}, 0x00008000},
+       {FEAT_DEF(PCID),              {0x1, 0, 0, 0, REG_ECX}, 0x00020000},
+       {FEAT_DEF(DCA),               {0x1, 0, 0, 0, REG_ECX}, 0x00040000},
+       {FEAT_DEF(SSE4_1),            {0x1, 0, 0, 0, REG_ECX}, 0x00080000},
+       {FEAT_DEF(SSE4_2),            {0x1, 0, 0, 0, REG_ECX}, 0x00100000},
+       {FEAT_DEF(X2APIC),            {0x1, 0, 0, 0, REG_ECX}, 0x00200000},
+       {FEAT_DEF(MOVBE),             {0x1, 0, 0, 0, REG_ECX}, 0x00400000},
+       {FEAT_DEF(POPCNT),            {0x1, 0, 0, 0, REG_ECX}, 0x00800000},
+       {FEAT_DEF(TSC_DEADLINE),      {0x1, 0, 0, 0, REG_ECX}, 0x01000000},
+       {FEAT_DEF(AES),               {0x1, 0, 0, 0, REG_ECX}, 0x02000000},
+       {FEAT_DEF(XSAVE),             {0x1, 0, 0, 0, REG_ECX}, 0x04000000},
+       {FEAT_DEF(OSXSAVE),           {0x1, 0, 0, 0, REG_ECX}, 0x08000000},
+       {FEAT_DEF(AVX),               {0x1, 0, 0, 0, REG_ECX}, 0x10000000},
+       {FEAT_DEF(F16C),              {0x1, 0, 0, 0, REG_ECX}, 0x20000000},
+       {FEAT_DEF(RDRAND),            {0x1, 0, 0, 0, REG_ECX}, 0x40000000},
+
+       {FEAT_DEF(FPU),               {0x1, 0, 0, 0, REG_EDX}, 0x00000001},
+       {FEAT_DEF(VME),               {0x1, 0, 0, 0, REG_EDX}, 0x00000002},
+       {FEAT_DEF(DE),                {0x1, 0, 0, 0, REG_EDX}, 0x00000004},
+       {FEAT_DEF(PSE),               {0x1, 0, 0, 0, REG_EDX}, 0x00000008},
+       {FEAT_DEF(TSC),               {0x1, 0, 0, 0, REG_EDX}, 0x00000010},
+       {FEAT_DEF(MSR),               {0x1, 0, 0, 0, REG_EDX}, 0x00000020},
+       {FEAT_DEF(PAE),               {0x1, 0, 0, 0, REG_EDX}, 0x00000040},
+       {FEAT_DEF(MCE),               {0x1, 0, 0, 0, REG_EDX}, 0x00000080},
+       {FEAT_DEF(CX8),               {0x1, 0, 0, 0, REG_EDX}, 0x00000100},
+       {FEAT_DEF(APIC),              {0x1, 0, 0, 0, REG_EDX}, 0x00000200},
+       {FEAT_DEF(SEP),               {0x1, 0, 0, 0, REG_EDX}, 0x00000800},
+       {FEAT_DEF(MTRR),              {0x1, 0, 0, 0, REG_EDX}, 0x00001000},
+       {FEAT_DEF(PGE),               {0x1, 0, 0, 0, REG_EDX}, 0x00002000},
+       {FEAT_DEF(MCA),               {0x1, 0, 0, 0, REG_EDX}, 0x00004000},
+       {FEAT_DEF(CMOV),              {0x1, 0, 0, 0, REG_EDX}, 0x00008000},
+       {FEAT_DEF(PAT),               {0x1, 0, 0, 0, REG_EDX}, 0x00010000},
+       {FEAT_DEF(PSE36),             {0x1, 0, 0, 0, REG_EDX}, 0x00020000},
+       {FEAT_DEF(PSN),               {0x1, 0, 0, 0, REG_EDX}, 0x00040000},
+       {FEAT_DEF(CLFSH),             {0x1, 0, 0, 0, REG_EDX}, 0x00080000},
+       {FEAT_DEF(DS),                {0x1, 0, 0, 0, REG_EDX}, 0x00200000},
+       {FEAT_DEF(ACPI),              {0x1, 0, 0, 0, REG_EDX}, 0x00400000},
+       {FEAT_DEF(MMX),               {0x1, 0, 0, 0, REG_EDX}, 0x00800000},
+       {FEAT_DEF(FXSR),              {0x1, 0, 0, 0, REG_EDX}, 0x01000000},
+       {FEAT_DEF(SSE),               {0x1, 0, 0, 0, REG_EDX}, 0x02000000},
+       {FEAT_DEF(SSE2),              {0x1, 0, 0, 0, REG_EDX}, 0x04000000},
+       {FEAT_DEF(SS),                {0x1, 0, 0, 0, REG_EDX}, 0x08000000},
+       {FEAT_DEF(HTT),               {0x1, 0, 0, 0, REG_EDX}, 0x10000000},
+       {FEAT_DEF(TM),                {0x1, 0, 0, 0, REG_EDX}, 0x20000000},
+       {FEAT_DEF(PBE),               {0x1, 0, 0, 0, REG_EDX}, 0x80000000},
+
+       {FEAT_DEF(DIGTEMP),           {0x6, 0, 0, 0, REG_EAX}, 0x00000001},
+       {FEAT_DEF(TRBOBST),           {0x6, 0, 0, 0, REG_EAX}, 0x00000002},
+       {FEAT_DEF(ARAT),              {0x6, 0, 0, 0, REG_EAX}, 0x00000004},
+       {FEAT_DEF(PLN),               {0x6, 0, 0, 0, REG_EAX}, 0x00000010},
+       {FEAT_DEF(ECMD),              {0x6, 0, 0, 0, REG_EAX}, 0x00000020},
+       {FEAT_DEF(PTM),               {0x6, 0, 0, 0, REG_EAX}, 0x00000040},
+
+       {FEAT_DEF(MPERF_APERF_MSR),   {0x6, 0, 0, 0, REG_ECX}, 0x00000001},
+       {FEAT_DEF(ACNT2),             {0x6, 0, 0, 0, REG_ECX}, 0x00000002},
+       {FEAT_DEF(ENERGY_EFF),        {0x6, 0, 0, 0, REG_ECX}, 0x00000008},
+
+       {FEAT_DEF(FSGSBASE),          {0x7, 0, 0, 0, REG_EBX}, 0x00000001},
+       {FEAT_DEF(BMI1),              {0x7, 0, 0, 0, REG_EBX}, 0x00000004},
+       {FEAT_DEF(AVX2),              {0x7, 0, 0, 0, REG_EBX}, 0x00000010},
+       {FEAT_DEF(SMEP),              {0x7, 0, 0, 0, REG_EBX}, 0x00000040},
+       {FEAT_DEF(BMI2),              {0x7, 0, 0, 0, REG_EBX}, 0x00000080},
+       {FEAT_DEF(ERMS),              {0x7, 0, 0, 0, REG_EBX}, 0x00000100},
+       {FEAT_DEF(INVPCID),           {0x7, 0, 0, 0, REG_EBX}, 0x00000400},
+
+       {FEAT_DEF(LAHF_SAHF),  {0x80000001, 0, 0, 0, REG_ECX}, 0x00000001},
+       {FEAT_DEF(LZCNT),      {0x80000001, 0, 0, 0, REG_ECX}, 0x00000010},
+
+       {FEAT_DEF(SYSCALL),    {0x80000001, 0, 0, 0, REG_EDX}, 0x00000800},
+       {FEAT_DEF(XD),         {0x80000001, 0, 0, 0, REG_EDX}, 0x00100000},
+       {FEAT_DEF(1GB_PG),     {0x80000001, 0, 0, 0, REG_EDX}, 0x04000000},
+       {FEAT_DEF(RDTSCP),     {0x80000001, 0, 0, 0, REG_EDX}, 0x08000000},
+       {FEAT_DEF(EM64T),      {0x80000001, 0, 0, 0, REG_EDX}, 0x20000000},
+
+       {FEAT_DEF(INVTSC),     {0x80000007, 0, 0, 0, REG_EDX}, 0x00000100},
+};
+
+/*
+ * Execute CPUID instruction and get contents of a specific register
+ *
+ * This function, when compiled with GCC, will generate architecture-neutral
+ * code, as per GCC manual.
+ */
+static inline int
+rte_cpu_get_features(struct cpuid_parameters_t params)
+{
+       int eax, ebx, ecx, edx;            /* registers */
+
+       asm volatile ("cpuid"
+                     /* output */
+                     : "=a" (eax),
+                       "=b" (ebx),
+                       "=c" (ecx),
+                       "=d" (edx)
+                     /* input */
+                     : "a" (params.eax),
+                       "b" (params.ebx),
+                       "c" (params.ecx),
+                       "d" (params.edx));
+
+       switch (params.return_register) {
+       case REG_EAX:
+               return eax;
+       case REG_EBX:
+               return ebx;
+       case REG_ECX:
+               return ecx;
+       case REG_EDX:
+               return edx;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * Checks if a particular flag is available on current machine.
+ */
+int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
+{
+       int value;
+
+       if (feature >= RTE_CPUFLAG_NUMFLAGS)
+               /* Flag does not match anything in the feature tables */
+               return -ENOENT;
+
+       /* get value of the register containing the desired feature */
+       value = rte_cpu_get_features(cpu_feature_table[feature].params);
+
+       /* check if the feature is enabled */
+       return (cpu_feature_table[feature].feature_mask & value) > 0;
+}
+
+/**
+ * Checks if the machine is adequate for running the binary. If it is not, the
+ * program exits with status 1.
+ * The function attribute forces this function to be called before main(). But
+ * with ICC, the check is generated by the compiler.
+ */
+#ifndef __INTEL_COMPILER
+static void __attribute__ ((__constructor__))
+rte_cpu_check_supported(void)
+{
+       /* This is generated at compile-time by the build system */
+       static const enum rte_cpu_flag_t compile_time_flags[] = {
+                       RTE_COMPILE_TIME_CPUFLAGS
+       };
+       unsigned i;
+
+       for (i = 0; i < sizeof(compile_time_flags)/sizeof(compile_time_flags[0]); i++)
+               if (rte_cpu_get_flag_enabled(compile_time_flags[i]) < 1) {
+                       fprintf(stderr,
+                               "ERROR: This system does not support \"%s\".\n"
+                               "Please check that RTE_MACHINE is set correctly.\n",
+                               cpu_feature_table[compile_time_flags[i]].name);
+                       exit(1);
+               }
+}
+#endif
diff --git a/lib/librte_eal/common/eal_common_errno.c b/lib/librte_eal/common/eal_common_errno.c
new file mode 100644 (file)
index 0000000..9ed45e5
--- /dev/null
@@ -0,0 +1,72 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+RTE_DEFINE_PER_LCORE(int, _rte_errno);
+
+const char *
+rte_strerror(int errnum)
+{
+#define RETVAL_SZ 256
+       static RTE_DEFINE_PER_LCORE(char[RETVAL_SZ], retval);
+
+       /* since some implementations of strerror_r throw an error
+        * themselves if errnum is too big, we handle that case here */
+       if (errnum > RTE_MAX_ERRNO)
+               rte_snprintf(RTE_PER_LCORE(retval), RETVAL_SZ,
+                               "Unknown error %d", errnum);
+       else
+               switch (errnum){
+               case E_RTE_SECONDARY:
+                       return "Invalid call in secondary process";
+               case E_RTE_NO_CONFIG:
+                       return "Missing rte_config structure";
+               case E_RTE_NO_TAILQ:
+                       return "No TAILQ initialised";
+               default:
+                       strerror_r(errnum, RTE_PER_LCORE(retval), RETVAL_SZ);
+               }
+
+       return RTE_PER_LCORE(retval);
+}
diff --git a/lib/librte_eal/common/eal_common_launch.c b/lib/librte_eal/common/eal_common_launch.c
new file mode 100644 (file)
index 0000000..deef8e8
--- /dev/null
@@ -0,0 +1,122 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <errno.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+/*
+ * Wait until a lcore finished its job.
+ */
+int
+rte_eal_wait_lcore(unsigned slave_id)
+{
+       if (lcore_config[slave_id].state == WAIT)
+               return 0;
+
+       while (lcore_config[slave_id].state != WAIT &&
+              lcore_config[slave_id].state != FINISHED);
+
+       rte_rmb();
+
+       /* we are in finished state, go to wait state */
+       lcore_config[slave_id].state = WAIT;
+       return lcore_config[slave_id].ret;
+}
+
+/*
+ * Check that every SLAVE lcores are in WAIT state, then call
+ * rte_eal_remote_launch() for all of them. If call_master is true
+ * (set to CALL_MASTER), also call the function on the master lcore.
+ */
+int
+rte_eal_mp_remote_launch(int (*f)(void *), void *arg,
+                        enum rte_rmt_call_master_t call_master)
+{
+       int lcore_id;
+       int master = rte_get_master_lcore();
+
+       /* check state of lcores */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (lcore_config[lcore_id].state != WAIT)
+                       return -EBUSY;
+       }
+
+       /* send messages to cores */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_remote_launch(f, arg, lcore_id);
+       }
+
+       if (call_master == CALL_MASTER) {
+               lcore_config[master].ret = f(arg);
+               lcore_config[master].state = FINISHED;
+       }
+
+       return 0;
+}
+
+/*
+ * Return the state of the lcore identified by slave_id.
+ */
+enum rte_lcore_state_t
+rte_eal_get_lcore_state(unsigned lcore_id)
+{
+       return lcore_config[lcore_id].state;
+}
+
+/*
+ * Do a rte_eal_wait_lcore() for every lcore. The return values are
+ * ignored.
+ */
+void
+rte_eal_mp_wait_lcore(void)
+{
+       unsigned lcore_id;
+
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               rte_eal_wait_lcore(lcore_id);
+       }
+}
+
diff --git a/lib/librte_eal/common/eal_common_log.c b/lib/librte_eal/common/eal_common_log.c
new file mode 100644 (file)
index 0000000..1362109
--- /dev/null
@@ -0,0 +1,390 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_debug.h>
+#include <rte_spinlock.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+
+#include "eal_private.h"
+
+#define LOG_ELT_SIZE     2048
+
+#define LOG_HISTORY_MP_NAME "log_history"
+
+STAILQ_HEAD(log_history_list, log_history);
+
+/**
+ * The structure of a message log in the log history.
+ */
+struct log_history {
+       STAILQ_ENTRY(log_history) next;
+       unsigned size;
+       char buf[0];
+};
+
+static struct rte_mempool *log_history_mp = NULL;
+static unsigned log_history_size = 0;
+static struct log_history_list log_history;
+
+/* global log structure */
+struct rte_logs rte_logs = {
+       .type = ~0,
+       .level = RTE_LOG_DEBUG,
+       .file = NULL,
+};
+
+static rte_spinlock_t log_dump_lock = RTE_SPINLOCK_INITIALIZER;
+static rte_spinlock_t log_list_lock = RTE_SPINLOCK_INITIALIZER;
+static FILE *default_log_stream;
+static int history_enabled = 1;
+
+/**
+ * This global structure stores some informations about the message
+ * that is currently beeing processed by one lcore
+ */
+struct log_cur_msg {
+       uint32_t loglevel; /**< log level - see rte_log.h */
+       uint32_t logtype;  /**< log type  - see rte_log.h */
+} __rte_cache_aligned;
+static struct log_cur_msg log_cur_msg[RTE_MAX_LCORE]; /**< per core log */
+
+/* early logs */
+
+/*
+ * early log function, used during boot when mempool (hence log
+ * history) is not available
+ */
+static ssize_t
+early_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
+{
+       ssize_t ret;
+       ret = fwrite(buf, size, 1, stdout);
+       fflush(stdout);
+       if (ret == 0)
+               return -1;
+       return ret;
+}
+
+static ssize_t
+early_log_read(__attribute__((unused)) void *c,
+              __attribute__((unused)) char *buf,
+              __attribute__((unused)) size_t size)
+{
+       return 0;
+}
+
+/*
+ * this is needed because cookies_io_functions_t has a different
+ * prototype between newlib and glibc
+ */
+#ifdef RTE_EXEC_ENV_LINUXAPP
+static int
+early_log_seek(__attribute__((unused)) void *c,
+              __attribute__((unused)) off64_t *offset,
+              __attribute__((unused)) int whence)
+{
+       return -1;
+}
+#else
+static int
+early_log_seek(__attribute__((unused)) void *c,
+              __attribute__((unused)) _off_t *offset,
+              __attribute__((unused)) int whence)
+{
+       return -1;
+}
+#endif
+
+static int
+early_log_close(__attribute__((unused)) void *c)
+{
+       return 0;
+}
+
+static cookie_io_functions_t early_log_func = {
+       .read  = early_log_read,
+       .write = early_log_write,
+       .seek  = early_log_seek,
+       .close = early_log_close
+};
+static FILE *early_log_stream;
+
+/* default logs */
+
+int
+rte_log_add_in_history(const char *buf, size_t size)
+{
+       struct log_history *hist_buf = NULL;
+       void *obj;
+
+       if (history_enabled == 0)
+               return 0;
+
+       rte_spinlock_lock(&log_list_lock);
+
+       /* get a buffer for adding in history */
+       if (log_history_size > RTE_LOG_HISTORY) {
+               hist_buf = STAILQ_FIRST(&log_history);
+               STAILQ_REMOVE_HEAD(&log_history, next);
+       }
+       else {
+               if (rte_mempool_mc_get(log_history_mp, &obj) < 0)
+                       obj = NULL;
+               hist_buf = obj;
+       }
+
+       /* no buffer */
+       if (hist_buf == NULL) {
+               rte_spinlock_unlock(&log_list_lock);
+               return -ENOBUFS;
+       }
+
+       /* not enough room for msg, buffer go back in mempool */
+       if (size >= (LOG_ELT_SIZE - sizeof(*hist_buf))) {
+               rte_mempool_mp_put(log_history_mp, hist_buf);
+               rte_spinlock_unlock(&log_list_lock);
+               return -ENOBUFS;
+       }
+
+       /* add in history */
+       memcpy(hist_buf->buf, buf, size);
+       hist_buf->buf[LOG_ELT_SIZE-1] = '\0';
+       hist_buf->size = size;
+       STAILQ_INSERT_TAIL(&log_history, hist_buf, next);
+       rte_spinlock_unlock(&log_list_lock);
+
+       return 0;
+}
+
+void
+rte_log_set_history(int enable)
+{
+       history_enabled = enable;
+}
+
+/* Change the stream that will be used by logging system */
+int
+rte_openlog_stream(FILE *f)
+{
+       if (f == NULL)
+               rte_logs.file = default_log_stream;
+       else
+               rte_logs.file = f;
+       return 0;
+}
+
+/* Set global log level */
+void
+rte_set_log_level(uint32_t level)
+{
+       rte_logs.level = (uint32_t)level;
+}
+
+/* Set global log type */
+void
+rte_set_log_type(uint32_t type, int enable)
+{
+       if (enable)
+               rte_logs.type |= type;
+       else
+               rte_logs.type &= (~type);
+}
+
+/* get the current loglevel for the message beeing processed */
+int rte_log_cur_msg_loglevel(void)
+{
+       unsigned lcore_id;
+       lcore_id = rte_lcore_id();
+       return log_cur_msg[lcore_id].loglevel;
+}
+
+/* get the current logtype for the message beeing processed */
+int rte_log_cur_msg_logtype(void)
+{
+       unsigned lcore_id;
+       lcore_id = rte_lcore_id();
+       return log_cur_msg[lcore_id].logtype;
+}
+
+/* Dump log history on console */
+void
+rte_log_dump_history(void)
+{
+       struct log_history_list tmp_log_history;
+       struct log_history *hist_buf;
+       unsigned i;
+
+       /* only one dump at a time */
+       rte_spinlock_lock(&log_dump_lock);
+
+       /* save list, and re-init to allow logging during dump */
+       rte_spinlock_lock(&log_list_lock);
+       tmp_log_history = log_history;
+       STAILQ_INIT(&log_history);
+       rte_spinlock_unlock(&log_list_lock);
+
+       for (i=0; i<RTE_LOG_HISTORY; i++) {
+
+               /* remove one message from history list */
+               hist_buf = STAILQ_FIRST(&tmp_log_history);
+
+               if (hist_buf == NULL)
+                       break;
+
+               STAILQ_REMOVE_HEAD(&tmp_log_history, next);
+
+               /* write on stdout */
+               if (fwrite(hist_buf->buf, hist_buf->size, 1, stdout) == 0) {
+                       rte_mempool_mp_put(log_history_mp, hist_buf);
+                       break;
+               }
+
+               /* put back message structure in pool */
+               rte_mempool_mp_put(log_history_mp, hist_buf);
+       }
+       fflush(stdout);
+
+       rte_spinlock_unlock(&log_dump_lock);
+}
+
+/*
+ * Generates a log message The message will be sent in the stream
+ * defined by the previous call to rte_openlog_stream().
+ */
+int
+rte_vlog(__attribute__((unused)) uint32_t level,
+        __attribute__((unused)) uint32_t logtype,
+          const char *format, va_list ap)
+{
+       int ret;
+       FILE *f = rte_logs.file;
+       unsigned lcore_id;
+
+       /* save loglevel and logtype in a global per-lcore variable */
+       lcore_id = rte_lcore_id();
+       log_cur_msg[lcore_id].loglevel = level;
+       log_cur_msg[lcore_id].logtype = logtype;
+
+       ret = vfprintf(f, format, ap);
+       fflush(f);
+       return ret;
+}
+
+/*
+ * Generates a log message The message will be sent in the stream
+ * defined by the previous call to rte_openlog_stream().
+ */
+int
+rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
+{
+       va_list ap;
+       int ret;
+
+       va_start(ap, format);
+       ret = rte_vlog(level, logtype, format, ap);
+       va_end(ap);
+       return ret;
+}
+
+/*
+ * init the log library, called by rte_eal_init() to enable early
+ * logs
+ */
+int
+rte_eal_log_early_init(void)
+{
+       early_log_stream = fopencookie(NULL, "w+", early_log_func);
+       if (early_log_stream == NULL) {
+               printf("Cannot configure early_log_stream\n");
+               return -1;
+       }
+       rte_openlog_stream(early_log_stream);
+       return 0;
+}
+
+/*
+ * called by environment-specific log init function to initialize log
+ * history
+ */
+int
+rte_eal_common_log_init(FILE *default_log)
+{
+       STAILQ_INIT(&log_history);
+
+       /* reserve RTE_LOG_HISTORY*2 elements, so we can dump and
+        * keep logging during this time */
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               log_history_mp = rte_mempool_create(LOG_HISTORY_MP_NAME, RTE_LOG_HISTORY*2,
+                               LOG_ELT_SIZE, 0, 0,
+                               NULL, NULL,
+                               NULL, NULL,
+                               SOCKET_ID_ANY, 0);
+       else
+               log_history_mp = rte_mempool_lookup(LOG_HISTORY_MP_NAME);
+       if (log_history_mp == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): cannot create log_history mempool\n",
+                       __func__);
+               return -1;
+       }
+
+       default_log_stream = default_log;
+       rte_openlog_stream(default_log);
+       return 0;
+}
+
diff --git a/lib/librte_eal/common/eal_common_memory.c b/lib/librte_eal/common/eal_common_memory.c
new file mode 100644 (file)
index 0000000..448639e
--- /dev/null
@@ -0,0 +1,116 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_log.h>
+
+#include "eal_private.h"
+
+/*
+ * Return a pointer to a read-only table of struct rte_physmem_desc
+ * elements, containing the layout of all addressable physical
+ * memory. The last element of the table contains a NULL address.
+ */
+const struct rte_memseg *
+rte_eal_get_physmem_layout(void)
+{
+       return rte_eal_get_configuration()->mem_config->memseg;
+}
+
+
+/* get the total size of memory */
+uint64_t
+rte_eal_get_physmem_size(void)
+{
+       const struct rte_mem_config *mcfg;
+       unsigned i = 0;
+       uint64_t total_len = 0;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       for (i=0; i<RTE_MAX_MEMSEG; i++) {
+               if (mcfg->memseg[i].addr == NULL)
+                       break;
+
+               total_len += mcfg->memseg[i].len;
+       }
+
+       return total_len;
+}
+
+/* Dump the physical memory layout on console */
+void
+rte_dump_physmem_layout(void)
+{
+       const struct rte_mem_config *mcfg;
+       unsigned i = 0;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       for (i=0; i<RTE_MAX_MEMSEG; i++) {
+               if (mcfg->memseg[i].addr == NULL)
+                       break;
+               printf("phys:0x%"PRIx64", len:0x%"PRIx64", virt:%p, "
+                      "socket_id:%"PRId32"\n",
+                      mcfg->memseg[i].phys_addr,
+                      mcfg->memseg[i].len,
+                      mcfg->memseg[i].addr,
+                      mcfg->memseg[i].socket_id);
+       }
+}
+
+/* return the number of memory channels */
+unsigned rte_memory_get_nchannel(void)
+{
+       return rte_eal_get_configuration()->mem_config->nchannel;
+}
+
+/* return the number of memory rank */
+unsigned rte_memory_get_nrank(void)
+{
+       return rte_eal_get_configuration()->mem_config->nrank;
+}
diff --git a/lib/librte_eal/common/eal_common_memzone.c b/lib/librte_eal/common/eal_common_memzone.c
new file mode 100644 (file)
index 0000000..dae4ea0
--- /dev/null
@@ -0,0 +1,376 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_common.h>
+
+#include "eal_private.h"
+
+/* internal copy of free memory segments */
+static struct rte_memseg free_memseg[RTE_MAX_MEMSEG];
+
+/* pointer to last reserved memzone */
+static unsigned memzone_idx;
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor. If the
+ * allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve(const char *name, uint64_t len, int socket_id,
+                     unsigned flags)
+{
+       return rte_memzone_reserve_aligned(name,
+                       len, socket_id, flags, CACHE_LINE_SIZE);
+}
+
+/*
+ * Return a pointer to a correctly filled memzone descriptor (with a
+ * specified alignment). If the allocation cannot be done, return NULL.
+ */
+const struct rte_memzone *
+rte_memzone_reserve_aligned(const char *name, uint64_t len,
+               int socket_id, unsigned flags, unsigned align)
+{
+       struct rte_config *config;
+       unsigned i = 0;
+       int memseg_idx = -1;
+       uint64_t requested_len;
+       uint64_t memseg_len = 0;
+       phys_addr_t memseg_physaddr;
+       void *memseg_addr;
+       uintptr_t addr_offset;
+
+       /* if secondary processes return error */
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY){
+               RTE_LOG(ERR, EAL, "%s(): Not allowed in secondary process\n", __func__);
+               rte_errno = E_RTE_SECONDARY;
+               return NULL;
+       }
+
+       /* if alignment is not a power of two */
+       if (!rte_is_power_of_2(align)) {
+               RTE_LOG(ERR, EAL, "%s(): Invalid alignment: %u\n", __func__,
+                               align);
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       /* alignment less than cache size is not allowed */
+       if (align < CACHE_LINE_SIZE)
+               align = CACHE_LINE_SIZE;
+
+       /* get pointer to global configuration */
+       config = rte_eal_get_configuration();
+
+       /* no more room in config */
+       if (memzone_idx >= RTE_MAX_MEMZONE) {
+               RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
+               rte_errno = ENOSPC;
+               return NULL;
+       }
+
+       /* both sizes cannot be explicitly called for */
+       if ((flags & RTE_MEMZONE_1GB) && (flags & RTE_MEMZONE_2MB)) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       /* zone already exist */
+       if (rte_memzone_lookup(name) != NULL) {
+               RTE_LOG(DEBUG, EAL, "%s(): memzone <%s> already exists\n",
+                       __func__, name);
+               rte_errno = EEXIST;
+               return NULL;
+       }
+
+       /* align length on cache boundary */
+       len += CACHE_LINE_MASK;
+       len &= ~((uint64_t)CACHE_LINE_MASK);
+
+
+
+       /* save requested length */
+       requested_len = len;
+
+       /* reserve extra space for future alignment */
+       if (len)
+               len += align;
+
+       /* find the smallest segment matching requirements */
+       for (i = 0; i < RTE_MAX_MEMSEG; i++) {
+
+               /* last segment */
+               if (free_memseg[i].addr == NULL)
+                       break;
+
+               /* empty segment, skip it */
+               if (free_memseg[i].len == 0)
+                       continue;
+
+               /* bad socket ID */
+               if (socket_id != SOCKET_ID_ANY &&
+                   socket_id != free_memseg[i].socket_id)
+                       continue;
+
+               /* check len */
+               if (len != 0 && len > free_memseg[i].len)
+                       continue;
+
+               /* check flags for hugepage sizes */
+               if ((flags & RTE_MEMZONE_2MB) &&
+                               free_memseg[i].hugepage_sz == RTE_PGSIZE_1G )
+                       continue;
+               if ((flags & RTE_MEMZONE_1GB) &&
+                               free_memseg[i].hugepage_sz == RTE_PGSIZE_2M )
+                       continue;
+
+               /* this segment is the best until now */
+               if (memseg_idx == -1) {
+                       memseg_idx = i;
+                       memseg_len = free_memseg[i].len;
+               }
+               /* find the biggest contiguous zone */
+               else if (len == 0) {
+                       if (free_memseg[i].len > memseg_len) {
+                               memseg_idx = i;
+                               memseg_len = free_memseg[i].len;
+                       }
+               }
+               /*
+                * find the smallest (we already checked that current
+                * zone length is > len
+                */
+               else if (free_memseg[i].len < memseg_len) {
+                       memseg_idx = i;
+                       memseg_len = free_memseg[i].len;
+               }
+       }
+
+       /* no segment found */
+       if (memseg_idx == -1) {
+               /*
+                * If RTE_MEMZONE_SIZE_HINT_ONLY flag is specified,
+                * try allocating again without the size parameter otherwise -fail.
+                */
+               if ((flags & RTE_MEMZONE_SIZE_HINT_ONLY)  &&
+                ((flags & RTE_MEMZONE_1GB) || (flags & RTE_MEMZONE_2MB)))
+                       return rte_memzone_reserve_aligned(name, len - align,
+                                       socket_id, 0, align);
+
+               RTE_LOG(ERR, EAL, "%s(): No appropriate segment found\n", __func__);
+               rte_errno = ENOMEM;
+               return NULL;
+       }
+
+       /* get offset needed to adjust alignment */
+       addr_offset = (uintptr_t) RTE_PTR_SUB(
+                       RTE_ALIGN_CEIL(free_memseg[memseg_idx].addr, (uintptr_t) align),
+                       (uintptr_t) free_memseg[memseg_idx].addr);
+
+       /* save aligned physical and virtual addresses */
+       memseg_physaddr = free_memseg[memseg_idx].phys_addr + addr_offset;
+       memseg_addr = RTE_PTR_ADD(free_memseg[memseg_idx].addr, addr_offset);
+
+       /* if we are looking for a biggest memzone */
+       if (requested_len == 0)
+               requested_len = memseg_len - addr_offset;
+
+       /* set length to correct value */
+       len = addr_offset + requested_len;
+
+       /* update our internal state */
+       free_memseg[memseg_idx].len -= len;
+       free_memseg[memseg_idx].phys_addr += len;
+       free_memseg[memseg_idx].addr =
+               (char *)free_memseg[memseg_idx].addr + len;
+
+       /* fill the zone in config */
+       struct rte_memzone *mz = &config->mem_config->memzone[memzone_idx++];
+       rte_snprintf(mz->name, sizeof(mz->name), "%s", name);
+       mz->phys_addr = memseg_physaddr;
+       mz->addr = memseg_addr;
+       mz->len = requested_len;
+       mz->hugepage_sz = free_memseg[memseg_idx].hugepage_sz;
+       mz->socket_id = free_memseg[memseg_idx].socket_id;
+       mz->flags = 0;
+
+       return mz;
+}
+
+/*
+ * Lookup for the memzone identified by the given name
+ */
+const struct rte_memzone *
+rte_memzone_lookup(const char *name)
+{
+       const struct rte_mem_config *mcfg;
+       unsigned i = 0;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       /*
+        * the algorithm is not optimal (linear), but there are few
+        * zones and this function should be called at init only
+        */
+       for (i = 0; i < RTE_MAX_MEMZONE && mcfg->memzone[i].addr != NULL; i++) {
+               if (!strncmp(name, mcfg->memzone[i].name, RTE_MEMZONE_NAMESIZE))
+                       return &mcfg->memzone[i];
+       }
+       return NULL;
+}
+
+/* Dump all reserved memory zones on console */
+void
+rte_memzone_dump(void)
+{
+       const struct rte_mem_config *mcfg;
+       unsigned i = 0;
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       /* dump all zones */
+       for (i=0; i<RTE_MAX_MEMZONE; i++) {
+               if (mcfg->memzone[i].addr == NULL)
+                       break;
+               printf("name:<%s>, phys:0x%"PRIx64", len:0x%"PRIx64""
+                      ", virt:%p, socket_id:%"PRId32"\n",
+                      mcfg->memzone[i].name,
+                      mcfg->memzone[i].phys_addr,
+                      mcfg->memzone[i].len,
+                      mcfg->memzone[i].addr,
+                      mcfg->memzone[i].socket_id);
+       }
+}
+
+/*
+ * called by init: modify the free memseg list to have cache-aligned
+ * addresses and cache-aligned lengths
+ */
+static int
+memseg_sanitize(struct rte_memseg *memseg)
+{
+       unsigned phys_align;
+       unsigned virt_align;
+       unsigned off;
+
+       phys_align = memseg->phys_addr & CACHE_LINE_MASK;
+       virt_align = (unsigned long)memseg->addr & CACHE_LINE_MASK;
+
+       /*
+        * sanity check: phys_addr and addr must have the same
+        * alignment
+        */
+       if (phys_align != virt_align)
+               return -1;
+
+       /* memseg is really too small, don't bother with it */
+       if (memseg->len < (2 * CACHE_LINE_SIZE)) {
+               memseg->len = 0;
+               return 0;
+       }
+
+       /* align start address */
+       off = (CACHE_LINE_SIZE - phys_align) & CACHE_LINE_MASK;
+       memseg->phys_addr += off;
+       memseg->addr = (char *)memseg->addr + off;
+       memseg->len -= off;
+
+       /* align end address */
+       memseg->len &= ~((uint64_t)CACHE_LINE_MASK);
+
+       return 0;
+}
+
+/*
+ * Init the memzone subsystem
+ */
+int
+rte_eal_memzone_init(void)
+{
+       struct rte_config *config;
+       const struct rte_memseg *memseg;
+       unsigned i = 0;
+
+       /* secondary processes don't need to initialise anything */
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+               return 0;
+
+       /* get pointer to global configuration */
+       config = rte_eal_get_configuration();
+
+       memseg = rte_eal_get_physmem_layout();
+       if (memseg == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): Cannot get physical layout\n", __func__);
+               return -1;
+       }
+
+       /* duplicate the memsegs from config */
+       memcpy(free_memseg, memseg, sizeof(free_memseg));
+
+       /* make all zones cache-aligned */
+       for (i=0; i<RTE_MAX_MEMSEG; i++) {
+               if (free_memseg[i].addr == NULL)
+                       break;
+               if (memseg_sanitize(&free_memseg[i]) < 0) {
+                       RTE_LOG(ERR, EAL, "%s(): Sanity check failed\n", __func__);
+                       return -1;
+               }
+       }
+
+       /* delete all zones */
+       memzone_idx = 0;
+       memset(config->mem_config->memzone, 0, sizeof(config->mem_config->memzone));
+
+       return 0;
+}
diff --git a/lib/librte_eal/common/eal_common_pci.c b/lib/librte_eal/common/eal_common_pci.c
new file mode 100644 (file)
index 0000000..fe24265
--- /dev/null
@@ -0,0 +1,145 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+
+#include "eal_private.h"
+
+struct pci_driver_list driver_list;
+struct pci_device_list device_list;
+
+static struct rte_pci_addr *dev_blacklist = NULL;
+static unsigned dev_blacklist_size = 0;
+
+static int is_blacklisted(struct rte_pci_device *dev)
+{
+       struct rte_pci_addr *loc = &dev->addr;
+       unsigned i;
+
+       for (i = 0; i < dev_blacklist_size; i++) {
+               if ((loc->domain == dev_blacklist[i].domain) &&
+                               (loc->bus == dev_blacklist[i].bus) &&
+                               (loc->devid == dev_blacklist[i].devid) &&
+                               (loc->function == dev_blacklist[i].function)) {
+                       return 1;
+               }
+       }
+
+       return 0;           /* not in blacklist */
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of all
+ * registered driver for the given device. Return -1 if no driver is
+ * found for this device.
+ */
+static int
+pci_probe_all_drivers(struct rte_pci_device *dev)
+{
+       struct rte_pci_driver *dr = NULL;
+
+       TAILQ_FOREACH(dr, &driver_list, next) {
+               if (is_blacklisted(dev))
+                       return -1;
+               if (rte_eal_pci_probe_one_driver(dr, dev) == 0)
+                       return 0;
+       }
+       return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and call the devinit() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ */
+int
+rte_eal_pci_probe(void)
+{
+       struct rte_pci_device *dev = NULL;
+
+       TAILQ_FOREACH(dev, &device_list, next)
+               pci_probe_all_drivers(dev);
+
+       return 0;
+}
+
+/* dump one device */
+static int
+pci_dump_one_device(struct rte_pci_device *dev)
+{
+       printf(PCI_PRI_FMT, dev->addr.domain, dev->addr.bus,
+              dev->addr.devid, dev->addr.function);
+       printf(" - vendor:%x device:%x\n", dev->id.vendor_id,
+              dev->id.device_id);
+       printf("   %16.16"PRIx64" %16.16"PRIx64"\n",
+              dev->mem_resource.phys_addr, dev->mem_resource.len);
+       return 0;
+}
+
+/* dump devices on the bus */
+void
+rte_eal_pci_dump(void)
+{
+       struct rte_pci_device *dev = NULL;
+
+       TAILQ_FOREACH(dev, &device_list, next) {
+               pci_dump_one_device(dev);
+       }
+}
+
+/* register a driver */
+void
+rte_eal_pci_register(struct rte_pci_driver *driver)
+{
+       TAILQ_INSERT_TAIL(&driver_list, driver, next);
+}
+
+void
+rte_eal_pci_set_blacklist(struct rte_pci_addr *blacklist, unsigned size)
+{
+       dev_blacklist = blacklist;
+       dev_blacklist_size = size;
+}
diff --git a/lib/librte_eal/common/eal_common_tailqs.c b/lib/librte_eal/common/eal_common_tailqs.c
new file mode 100644 (file)
index 0000000..7702b1f
--- /dev/null
@@ -0,0 +1,113 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/queue.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_memory.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_log.h>
+#include <rte_string_fns.h>
+#include "eal_private.h"
+
+static unsigned tailq_idx = 0;
+
+struct rte_tailq_head *
+rte_eal_tailq_lookup(const char *name)
+{
+       unsigned i;
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+       /*
+        * the algorithm is not optimal (linear), but there are few
+        * tailq's and this function should be called at init only
+        */
+       for (i = 0; i < RTE_MAX_TAILQ; i++) {
+               if (!strncmp(name, mcfg->tailq_head[i].qname, RTE_TAILQ_NAMESIZE-1))
+                       return &mcfg->tailq_head[i];
+       }
+       return NULL;
+}
+
+struct rte_tailq_head *
+rte_eal_tailq_reserve(const char *name)
+{
+       struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+               return rte_eal_tailq_lookup(name);
+
+       if (tailq_idx == RTE_MAX_TAILQ){
+               RTE_LOG(ERR, EAL, "%s(): No more room in config\n", __func__);
+               return NULL;
+       }
+
+       /* zone already exist */
+       if (rte_eal_tailq_lookup(name) != NULL) {
+               RTE_LOG(DEBUG, EAL, "%s(): tailq <%s> already exists\n",
+                       __func__, name);
+               return NULL;
+       }
+
+       rte_snprintf(mcfg->tailq_head[tailq_idx].qname, RTE_TAILQ_NAMESIZE,
+                       "%.*s", (int)(RTE_TAILQ_NAMESIZE - 1), name);
+
+       return &mcfg->tailq_head[tailq_idx++];
+}
+
+int
+rte_eal_tailqs_init(void)
+{
+       unsigned i;
+       struct rte_config *cfg = rte_eal_get_configuration();
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               for (i = 0; i < RTE_MAX_TAILQ; i++)
+                       TAILQ_INIT(&cfg->mem_config->tailq_head[i].tailq_head);
+
+       return 0;
+}
diff --git a/lib/librte_eal/common/include/eal_private.h b/lib/librte_eal/common/include/eal_private.h
new file mode 100644 (file)
index 0000000..023e418
--- /dev/null
@@ -0,0 +1,176 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _EAL_PRIVATE_H_
+#define _EAL_PRIVATE_H_
+
+/**
+ * Initialize the memzone subsystem (private to eal).
+ *
+ * @return
+ *   - 0 on success
+ *   - Negative on error
+ */
+int rte_eal_memzone_init(void);
+
+/**
+ * Common log initialization function (private to eal).
+ *
+ * Called by environment-specific log initialization function to initialize
+ * log history.
+ *
+ * @param default_log
+ *   The default log stream to be used.
+ * @return
+ *   - 0 on success
+ *   - Negative on error
+ */
+int rte_eal_common_log_init(FILE *default_log);
+
+/**
+ * Fill configuration with number of physical and logical processors
+ *
+ * This function is private to EAL.
+ *
+ * Parse /proc/cpuinfo to get the number of physical and logical
+ * processors on the machine.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_cpu_init(void);
+
+/**
+ * Map memory
+ *
+ * This function is private to EAL.
+ *
+ * Fill configuration structure with these infos, and return 0 on success.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_memory_init(void);
+
+/**
+ * Configure HPET
+ *
+ * This function is private to EAL.
+ *
+ * Mmap memory areas used by HPET (high precision event timer) that will
+ * provide our time reference.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_hpet_init(void);
+
+/**
+ * Init early logs
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_log_early_init(void);
+
+/**
+ * Init the default log stream
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_log_init(void);
+
+/**
+ * Init the default log stream
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_pci_init(void);
+
+struct rte_pci_driver;
+struct rte_pci_device;
+
+/**
+ * Mmap memory for single PCI device
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *   0 on success, negative on error
+ */
+int rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr,
+               struct rte_pci_device *dev);
+
+/**
+ * Init tail queues for non-EAL library structures. This is to allow
+ * the rings, mempools, etc. lists to be shared among multiple processes
+ *
+ * This function is private to EAL
+ *
+ * @return
+ *    0 on success, negative on error
+ */
+int rte_eal_tailqs_init(void);
+
+/**
+ * Init interrupt handling.
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *  0 on success, negative on error
+ */
+int rte_eal_intr_init(void);
+
+/**
+ * Init alarm mechanism. This is to allow a callback be called after
+ * specific time.
+ *
+ * This function is private to EAL.
+ *
+ * @return
+ *  0 on success, negative on error
+ */
+int rte_eal_alarm_init(void);
+
+#endif /* _EAL_PRIVATE_H_ */
diff --git a/lib/librte_eal/common/include/i686/arch/rte_atomic.h b/lib/librte_eal/common/include/i686/arch/rte_atomic.h
new file mode 100644 (file)
index 0000000..c834290
--- /dev/null
@@ -0,0 +1,959 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/i386/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic <rte_atomic.h>"
+#endif
+
+#ifndef _RTE_I686_ATOMIC_H_
+#define _RTE_I686_ATOMIC_H_
+
+/**
+ * @file
+ * Atomic Operations on i686
+ */
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED                        /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define        rte_mb()  asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define        rte_wmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define        rte_rmb() asm volatile(MPLOCKED "addl $0,(%%esp)" : : : "memory")
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 16-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgw %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 16-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v)
+{
+       return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 16-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
+{
+       v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 16-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addw %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically subtract a 16-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subw %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically add a 16-bit value to a counter and return the result.
+ *
+ * Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
+{
+       int16_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddw %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return (int16_t)(prev + inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 16-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
+{
+       return rte_atomic16_add_return(v, (int16_t)-dec);
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically test and set a 16-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 16-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic16_clear(rte_atomic16_t *v)
+{
+       v->cnt = 0;
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 32-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgl %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_init(rte_atomic32_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 32-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v)
+{
+       return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 32-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
+{
+       v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 32-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addl %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically subtract a 32-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subl %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically add a 32-bit value to a counter and return the result.
+ *
+ * Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
+{
+       int32_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddl %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return (int32_t)(prev + inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 32-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
+{
+       return rte_atomic32_add_return(v, -dec);
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically test and set a 32-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 32-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic32_clear(rte_atomic32_t *v)
+{
+       v->cnt = 0;
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 64-bit words)
+ *
+ * @param dst
+ *   The destination into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       uint8_t res;
+       union {
+               struct {
+                       uint32_t l32;
+                       uint32_t h32;
+               };
+               uint64_t u64;
+       } _exp, _src;
+
+       _exp.u64 = exp;
+       _src.u64 = src;
+
+       asm volatile (
+                       MPLOCKED
+                       "cmpxchg8b (%[dst]);"
+                       "setz %[res];"
+                       : [res] "=a" (res)      /* result in eax */
+                       : [dst] "S" (dst),      /* esi */
+                         "b" (_src.l32),       /* ebx */
+                         "c" (_src.h32),       /* ecx */
+                         "a" (_exp.l32),       /* eax */
+                         "d" (_exp.h32)        /* edx */
+                       : "memory" );           /* no-clobber list */
+
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int64_t cnt;  /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+ * Initialize the atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, 0);
+       }
+}
+
+/**
+ * Atomically read a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               /* replace the value by itself */
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp);
+       }
+       return tmp;
+}
+
+/**
+ * Atomically set a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value of the counter.
+ */
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, new_value);
+       }
+}
+
+/**
+ * Atomically add a 64-bit value to a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp + inc);
+       }
+}
+
+/**
+ * Atomically subtract a 64-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be substracted from the counter.
+ */
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp - dec);
+       }
+}
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       rte_atomic64_add(v, 1);
+}
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       rte_atomic64_sub(v, 1);
+}
+
+/**
+ * Add a 64-bit value to an atomic counter and return the result.
+ *
+ * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+ * returns the value of v after the addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp + inc);
+       }
+
+       return tmp + inc;
+}
+
+/**
+ * Subtract a 64-bit value from an atomic counter and return the result.
+ *
+ * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+ * and returns the value of v after the substraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be substracted from the counter.
+ * @return
+ *   The value of v after the substraction.
+ */
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       int success = 0;
+       uint64_t tmp;
+
+       while (success == 0) {
+               tmp = v->cnt;
+               success = rte_atomic64_cmpset((volatile uint64_t *)&v->cnt,
+                                             tmp, tmp - dec);
+       }
+
+       return tmp - dec;
+}
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns
+ * true if the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the addition is 0; false otherwise.
+ */
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_add_return(v, 1) == 0;
+}
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after substraction is 0; false otherwise.
+ */
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       return rte_atomic64_sub_return(v, 1) == 0;
+}
+
+/**
+ * Atomically test and set a 64-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       rte_atomic64_set(v, 0);
+}
+
+#endif /* _RTE_I686_ATOMIC_H_ */
diff --git a/lib/librte_eal/common/include/rte_alarm.h b/lib/librte_eal/common/include/rte_alarm.h
new file mode 100644 (file)
index 0000000..2ed2a11
--- /dev/null
@@ -0,0 +1,100 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_ALARM_H_
+#define _RTE_ALARM_H_
+
+/**
+ * @file
+ *
+ * Alarm functions
+ *
+ * Simple alarm-clock functionality supplied by eal.
+ * Does not require hpet support.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Signature of callback back function called when an alarm goes off.
+ */
+typedef void (*rte_eal_alarm_callback)(void *arg);
+
+/**
+ * Function to set a callback to be triggered when us microseconds
+ * have expired. Accuracy of timing to the microsecond is not guaranteed. The
+ * alarm function will not be called *before* the requested time, but may
+ * be called a short period of time afterwards.
+ * The alarm handler will be called only once. There is no need to call
+ * "rte_eal_alarm_cancel" from within the callback function.
+ *
+ * @param us
+ *   The time in microseconds before the callback is called
+ * @param cb
+ *   The function to be called when the alarm expires
+ * @param cb_arg
+ *   Pointer parameter to be passed to the callback function
+ *
+ * @return
+ *   On success, zero.
+ *   On failure, a negative error number
+ */
+int rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb, void *cb_arg);
+
+/**
+ * Function to cancel an alarm callback which has been registered before.
+ *
+ * @param cb_fn
+ *  alarm callback
+ * @param cb_arg
+ *  Pointer parameter to be passed to the callback function. To remove all
+ *  copies of a given callback function, irrespective of parameter, (void *)-1
+ *  can be used here.
+ *
+ * @return
+ *  - The number of callbacks removed
+ */
+int rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_ALARM_H_ */
diff --git a/lib/librte_eal/common/include/rte_atomic.h b/lib/librte_eal/common/include/rte_atomic.h
new file mode 100644 (file)
index 0000000..dd41397
--- /dev/null
@@ -0,0 +1,657 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#define _RTE_ATOMIC_H_
+
+/**
+ * @file
+ * Atomic Operations
+ *
+ * This file defines a generic API for atomic
+ * operations. The implementation is architecture-specific.
+ *
+ * See lib/librte_eal/common/include/i686/arch/rte_atomic.h
+ * See lib/librte_eal/common/include/x86_64/arch/rte_atomic.h
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include "arch/rte_atomic.h"
+
+
+#ifdef __DOXYGEN__
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define rte_mb()  asm volatile("mfence;" : : : "memory")
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define rte_wmb() asm volatile("sfence;" : : : "memory")
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define rte_rmb() asm volatile("lfence;" : : : "memory")
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 16-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src);
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 16-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v);
+
+/**
+ * Atomically set a counter to a 16-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value);
+
+/**
+ * Atomically add a 16-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc);
+
+/**
+ * Atomically subtract a 16-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec);
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v);
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v);
+
+/**
+ * Atomically add a 16-bit value to a counter and return the result.
+ *
+ * Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc);
+
+/**
+ * Atomically subtract a 16-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 16-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec);
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int
+rte_atomic16_inc_and_test(rte_atomic16_t *v);
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int
+rte_atomic16_dec_and_test(rte_atomic16_t *v);
+
+/**
+ * Atomically test and set a 16-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int
+rte_atomic16_test_and_set(rte_atomic16_t *v);
+
+/**
+ * Atomically set a 16-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_clear(rte_atomic16_t *v);
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 32-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src);
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_init(rte_atomic32_t *v);
+
+/**
+ * Atomically read a 32-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v);
+
+/**
+ * Atomically set a counter to a 32-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value);
+
+/**
+ * Atomically add a 32-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc);
+
+/**
+ * Atomically subtract a 32-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec);
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v);
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v);
+
+/**
+ * Atomically add a 32-bit value to a counter and return the result.
+ *
+ * Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc);
+
+/**
+ * Atomically subtract a 32-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 32-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec);
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int
+rte_atomic32_inc_and_test(rte_atomic32_t *v);
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int
+rte_atomic32_dec_and_test(rte_atomic32_t *v);
+
+/**
+ * Atomically test and set a 32-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int
+rte_atomic32_test_and_set(rte_atomic32_t *v);
+
+/**
+ * Atomically set a 32-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_clear(rte_atomic32_t *v);
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 64-bit words)
+ *
+ * @param dst
+ *   The destination into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src);
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int64_t cnt;  /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+ * Initialize the atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_init(rte_atomic64_t *v);
+
+/**
+ * Atomically read a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v);
+
+/**
+ * Atomically set a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value of the counter.
+ */
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value);
+
+/**
+ * Atomically add a 64-bit value to a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc);
+
+/**
+ * Atomically subtract a 64-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec);
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v);
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v);
+
+/**
+ * Add a 64-bit value to an atomic counter and return the result.
+ *
+ * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+ * returns the value of v after the addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc);
+
+/**
+ * Subtract a 64-bit value from an atomic counter and return the result.
+ *
+ * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+ * and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec);
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns
+ * true if the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the addition is 0; false otherwise.
+ */
+static inline int
+rte_atomic64_inc_and_test(rte_atomic64_t *v);
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after subtraction is 0; false otherwise.
+ */
+static inline int
+rte_atomic64_dec_and_test(rte_atomic64_t *v);
+
+/**
+ * Atomically test and set a 64-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int
+rte_atomic64_test_and_set(rte_atomic64_t *v);
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_clear(rte_atomic64_t *v);
+
+#endif /* __DOXYGEN__ */
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ATOMIC_H_ */
diff --git a/lib/librte_eal/common/include/rte_branch_prediction.h b/lib/librte_eal/common/include/rte_branch_prediction.h
new file mode 100644 (file)
index 0000000..a65a722
--- /dev/null
@@ -0,0 +1,72 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ * Branch Prediction Helpers in RTE
+ */
+
+#ifndef _RTE_BRANCH_PREDICTION_H_
+#define _RTE_BRANCH_PREDICTION_H_
+
+/**
+ * Check if a branch is likely to be taken.
+ *
+ * This compiler builtin allows the developer to indicate if a branch is
+ * likely to be taken. Example:
+ *
+ *   if (likely(x > 1))
+ *      do_stuff();
+ *
+ */
+#ifndef likely
+#define likely(x)  __builtin_expect((x),1)
+#endif /* likely */
+
+/**
+ * Check if a branch is unlikely to be taken.
+ *
+ * This compiler builtin allows the developer to indicate if a branch is
+ * unlikely to be taken. Example:
+ *
+ *   if (unlikely(x < 1))
+ *      do_stuff();
+ *
+ */
+#ifndef unlikely
+#define unlikely(x)  __builtin_expect((x),0)
+#endif /* unlikely */
+
+#endif /* _RTE_BRANCH_PREDICTION_H_ */
diff --git a/lib/librte_eal/common/include/rte_byteorder.h b/lib/librte_eal/common/include/rte_byteorder.h
new file mode 100644 (file)
index 0000000..ccaa528
--- /dev/null
@@ -0,0 +1,244 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_BYTEORDER_H_
+#define _RTE_BYTEORDER_H_
+
+/**
+ * @file
+ *
+ * Byte Swap Operations
+ *
+ * This file defines a generic API for byte swap operations. Part of
+ * the implementation is architecture-specific.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/*
+ * An internal function to swap bytes in a 16-bit value.
+ *
+ * It is used by rte_bswap16() when the value is constant. Do not use
+ * this function directly; rte_bswap16() is preferred.
+ */
+static inline uint16_t
+rte_constant_bswap16(uint16_t x)
+{
+       return (uint16_t)(((x & 0x00ffU) << 8) |
+               ((x & 0xff00U) >> 8));
+}
+
+/*
+ * An internal function to swap bytes in a 32-bit value.
+ *
+ * It is used by rte_bswap32() when the value is constant. Do not use
+ * this function directly; rte_bswap32() is preferred.
+ */
+static inline uint32_t
+rte_constant_bswap32(uint32_t x)
+{
+       return  ((x & 0x000000ffUL) << 24) |
+               ((x & 0x0000ff00UL) << 8) |
+               ((x & 0x00ff0000UL) >> 8) |
+               ((x & 0xff000000UL) >> 24);
+}
+
+/*
+ * An internal function to swap bytes of a 64-bit value.
+ *
+ * It is used by rte_bswap64() when the value is constant. Do not use
+ * this function directly; rte_bswap64() is preferred.
+ */
+static inline uint64_t
+rte_constant_bswap64(uint64_t x)
+{
+       return  ((x & 0x00000000000000ffULL) << 56) |
+               ((x & 0x000000000000ff00ULL) << 40) |
+               ((x & 0x0000000000ff0000ULL) << 24) |
+               ((x & 0x00000000ff000000ULL) <<  8) |
+               ((x & 0x000000ff00000000ULL) >>  8) |
+               ((x & 0x0000ff0000000000ULL) >> 24) |
+               ((x & 0x00ff000000000000ULL) >> 40) |
+               ((x & 0xff00000000000000ULL) >> 56);
+}
+
+/*
+ * An architecture-optimized byte swap for a 16-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap16().
+ */
+static inline uint16_t rte_arch_bswap16(uint16_t _x)
+{
+       register uint16_t x = _x;
+       asm volatile ("xchgb %b[x1],%h[x2]"
+                     : [x1] "=Q" (x)
+                     : [x2] "0" (x)
+                     );
+       return x;
+}
+
+/*
+ * An architecture-optimized byte swap for a 32-bit value.
+ *
+ * Do not use this function directly. The preferred function is rte_bswap32().
+ */
+static inline uint32_t rte_arch_bswap32(uint32_t _x)
+{
+       register uint32_t x = _x;
+       asm volatile ("bswap %[x]"
+                     : [x] "+r" (x)
+                     );
+       return x;
+}
+
+/*
+ * An architecture-optimized byte swap for a 64-bit value.
+ *
+  * Do not use this function directly. The preferred function is rte_bswap64().
+ */
+#ifdef RTE_ARCH_X86_64
+/* 64-bit mode */
+static inline uint64_t rte_arch_bswap64(uint64_t _x)
+{
+       register uint64_t x = _x;
+       asm volatile ("bswap %[x]"
+                     : [x] "+r" (x)
+                     );
+       return x;
+}
+#else /* ! RTE_ARCH_X86_64 */
+/* Compat./Leg. mode */
+static inline uint64_t rte_arch_bswap64(uint64_t x)
+{
+       uint64_t ret = 0;
+       ret |= ((uint64_t)rte_arch_bswap32(x & 0xffffffffUL) << 32);
+       ret |= ((uint64_t)rte_arch_bswap32((x >> 32) & 0xffffffffUL));
+       return ret;
+}
+#endif /* RTE_ARCH_X86_64 */
+
+/**
+ * Swap bytes in a 16-bit value.
+ */
+#define rte_bswap16(x) ((uint16_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap16(x) :            \
+                                  rte_arch_bswap16(x)))                \
+
+/**
+ * Swap bytes in a 32-bit value.
+ */
+#define rte_bswap32(x) ((uint32_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap32(x) :            \
+                                  rte_arch_bswap32(x)))                \
+
+/**
+ * Swap bytes in a 64-bit value.
+ */
+#define rte_bswap64(x) ((uint64_t)(__builtin_constant_p(x) ?           \
+                                  rte_constant_bswap64(x) :            \
+                                  rte_arch_bswap64(x)))                \
+
+/**
+ * Convert a 16-bit value from CPU order to little endian.
+ */
+#define rte_cpu_to_le_16(x) (x)
+
+/**
+ * Convert a 32-bit value from CPU order to little endian.
+ */
+#define rte_cpu_to_le_32(x) (x)
+
+/**
+ * Convert a 64-bit value from CPU order to little endian.
+ */
+#define rte_cpu_to_le_64(x) (x)
+
+
+/**
+ * Convert a 16-bit value from CPU order to big endian.
+ */
+#define rte_cpu_to_be_16(x) rte_bswap16(x)
+
+/**
+ * Convert a 32-bit value from CPU order to big endian.
+ */
+#define rte_cpu_to_be_32(x) rte_bswap32(x)
+
+/**
+ * Convert a 64-bit value from CPU order to big endian.
+ */
+#define rte_cpu_to_be_64(x) rte_bswap64(x)
+
+
+/**
+ * Convert a 16-bit value from little endian to CPU order.
+ */
+#define rte_le_to_cpu_16(x) (x)
+
+/**
+ * Convert a 32-bit value from little endian to CPU order.
+ */
+#define rte_le_to_cpu_32(x) (x)
+
+/**
+ * Convert a 64-bit value from little endian to CPU order.
+ */
+#define rte_le_to_cpu_64(x) (x)
+
+
+/**
+ * Convert a 16-bit value from big endian to CPU order.
+ */
+#define rte_be_to_cpu_16(x) rte_bswap16(x)
+
+/**
+ * Convert a 32-bit value from big endian to CPU order.
+ */
+#define rte_be_to_cpu_32(x) rte_bswap32(x)
+
+/**
+ * Convert a 64-bit value from big endian to CPU order.
+ */
+#define rte_be_to_cpu_64(x) rte_bswap64(x)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_BYTEORDER_H_ */
diff --git a/lib/librte_eal/common/include/rte_common.h b/lib/librte_eal/common/include/rte_common.h
new file mode 100644 (file)
index 0000000..3c84569
--- /dev/null
@@ -0,0 +1,310 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_COMMON_H_
+#define _RTE_COMMON_H_
+
+/**
+ * @file
+ *
+ * Generic, commonly-used macro and inline function definitions
+ * for Intel DPDK.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <errno.h>
+
+/*********** Macros to eliminate unused variable warnings ********/
+
+/**
+ * short definition to mark a function parameter unused
+ */
+#define __rte_unused __attribute__((__unused__))
+
+/**
+ * definition to mark a variable or function parameter as used so
+ * as to avoid a compiler warning
+ */
+#define RTE_SET_USED(x) (void)(x)
+
+/*********** Macros for pointer arithmetic ********/
+
+/**
+ * add a byte-value offset from a pointer
+ */
+#define RTE_PTR_ADD(ptr, x) ((typeof(ptr))((uintptr_t)ptr + (x)))
+
+/**
+ * subtract a byte-value offset from a pointer
+ */
+#define RTE_PTR_SUB(ptr, x) ((typeof(ptr))((uintptr_t)ptr - (x)))
+
+/**
+ * get the difference between two pointer values, i.e. how far apart
+ * in bytes are the locations they point two. It is assumed that
+ * ptr1 is greater than ptr2.
+ */
+#define RTE_PTR_DIFF(ptr1, ptr2) ((uintptr_t)(ptr1) - (uintptr_t)(ptr2))
+
+/*********** Macros/static functions for doing alignment ********/
+
+/**
+ * Function which rounds an unsigned int down to a given power-of-two value.
+ * Takes uintptr_t types as parameters, as this type of operation is most
+ * commonly done for pointer alignment. (See also RTE_ALIGN_FLOOR,
+ * RTE_ALIGN_CEIL, RTE_ALIGN, RTE_PTR_ALIGN_FLOOR, RTE_PTR_ALIGN_CEL,
+ * RTE_PTR_ALIGN macros)
+ * @param ptr
+ *   The value to be rounded down
+ * @param align
+ *   The power-of-two of which the result must be a multiple.
+ * @return
+ *   Function returns a properly aligned value where align is a power-of-two.
+ *   If align is not a power-of-two, result will be incorrect.
+ */
+static inline uintptr_t
+rte_align_floor_int(uintptr_t ptr, uintptr_t align)
+{
+       return (ptr & ~(align - 1));
+}
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no higher than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_ALIGN_FLOOR(ptr, align) \
+       (typeof(ptr))rte_align_floor_int((uintptr_t)ptr, align)
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ */
+#define RTE_ALIGN_CEIL(ptr, align) \
+       RTE_ALIGN_FLOOR(RTE_PTR_ADD(ptr, align - 1), align)
+
+/**
+ * Macro to align a pointer to a given power-of-two. The resultant
+ * pointer will be a pointer of the same type as the first parameter, and
+ * point to an address no lower than the first parameter. Second parameter
+ * must be a power-of-two value.
+ * This function is the same as RTE_ALIGN_CEIL
+ */
+#define RTE_ALIGN(ptr, align) RTE_ALIGN_CEIL(ptr, align)
+
+/**
+ * Checks if a pointer is aligned to a given power-of-two value
+ *
+ * @param ptr
+ *   The pointer whose alignment is to be checked
+ * @param align
+ *   The power-of-two value to which the ptr should be aligned
+ *
+ * @return
+ *   True(1) where the pointer is correctly aligned, false(0) otherwise
+ */
+static inline int
+rte_is_aligned(void *ptr, unsigned align)
+{
+       return RTE_ALIGN(ptr, align) == ptr;
+}
+
+/*********** Macros for compile type checks ********/
+
+/**
+ * Triggers an error at compilation time if the condition is true.
+ */
+#ifndef __OPTIMIZE__
+#define RTE_BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+#else
+extern int RTE_BUILD_BUG_ON_detected_error;
+#define RTE_BUILD_BUG_ON(condition) do {             \
+       ((void)sizeof(char[1 - 2*!!(condition)]));   \
+       if (condition)                               \
+               RTE_BUILD_BUG_ON_detected_error = 1; \
+} while(0)
+#endif
+
+/*********** Macros to work with powers of 2 ********/
+
+/**
+ * Returns true if n is a power of 2
+ * @param n
+ *     Number to check
+ * @return 1 if true, 0 otherwise
+ */
+static inline int
+rte_is_power_of_2(uint32_t n)
+{
+       return ((n-1) & n) == 0;
+}
+
+/**
+ * Aligns input parameter to the next power of 2
+ *
+ * @param x
+ *   The integer value to algin
+ *
+ * @return
+ *   Input parameter aligned to the next power of 2
+ */
+static inline uint32_t
+rte_align32pow2(uint32_t x)
+{
+       x--;
+       x |= x >> 1;
+       x |= x >> 2;
+       x |= x >> 4;
+       x |= x >> 8;
+       x |= x >> 16;
+
+       return x + 1;
+}
+
+/*********** Macros for calculating min and max **********/
+
+/**
+ * Macro to return the minimum of two numbers
+ */
+#define RTE_MIN(a, b) ({ \
+               typeof (a) _a = (a); \
+               typeof (b) _b = (b); \
+               _a < _b ? _a : _b; \
+       })
+
+/**
+ * Macro to return the maximum of two numbers
+ */
+#define RTE_MAX(a, b) ({ \
+               typeof (a) _a = (a); \
+               typeof (b) _b = (b); \
+               _a > _b ? _a : _b; \
+       })
+
+/*********** Other general functions / macros ********/
+
+/**
+ * PAUSE instruction for tight loops (avoid busy waiting)
+ */
+static inline void
+rte_pause (void)
+{
+       asm volatile ("pause");
+}
+
+#ifndef offsetof
+/** Return the offset of a field in a structure. */
+#define offsetof(TYPE, MEMBER)  __builtin_offsetof (TYPE, MEMBER)
+#endif
+
+#define _RTE_STR(x) #x
+/** Take a macro value and get a string version of it */
+#define RTE_STR(x) _RTE_STR(x)
+
+/**
+ * Converts a numeric string to the equivalent uint64_t value.
+ * As well as straight number conversion, also recognises the suffixes
+ * k, m and g for kilobytes, megabytes and gigabytes respectively.
+ *
+ * If a negative number is passed in  i.e. a string with the first non-black
+ * character being "-", zero is returned. Zero is also returned in the case of
+ * an error with the strtoull call in the function.
+ *
+ * @param str
+ *     String containing number to convert.
+ * @return
+ *     Number.
+ */
+static inline uint64_t
+rte_str_to_size(const char *str)
+{
+       char *endptr;
+       unsigned long long size;
+
+       while (isspace((int)*str))
+               str++;
+       if (*str == '-')
+               return 0;
+
+       errno = 0;
+       size = strtoull(str, &endptr, 0);
+       if (errno)
+               return 0;
+
+       if (*endptr == ' ')
+               endptr++; /* allow 1 space gap */
+
+       switch (*endptr){
+       case 'G': case 'g': size *= 1024; /* fall-through */
+       case 'M': case 'm': size *= 1024; /* fall-through */
+       case 'K': case 'k': size *= 1024; /* fall-through */
+       default:
+               break;
+       }
+       return size;
+}
+
+/**
+ * Function to terminate the application immediately, printing an error
+ * message and returning the exit_code back to the shell.
+ *
+ * This function never returns
+ *
+ * @param exit_code
+ *     The exit code to be returned by the application
+ * @param format
+ *     The format string to be used for printing the message. This can include
+ *     printf format characters which will be expanded using any further parameters
+ *     to the function.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+       __attribute__((noreturn))
+       __attribute__((format(printf, 2, 3)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/lib/librte_eal/common/include/rte_cpuflags.h b/lib/librte_eal/common/include/rte_cpuflags.h
new file mode 100644 (file)
index 0000000..72c3f2b
--- /dev/null
@@ -0,0 +1,174 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_CPUFLAGS_H_
+#define _RTE_CPUFLAGS_H_
+
+/**
+ * @file
+ * Simple API to determine available CPU features at runtime.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Enumeration of all CPU features supported
+ */
+enum rte_cpu_flag_t {
+       /* (EAX 01h) ECX features*/
+       RTE_CPUFLAG_SSE3 = 0,               /**< SSE3 */
+       RTE_CPUFLAG_PCLMULQDQ,              /**< PCLMULQDQ */
+       RTE_CPUFLAG_DTES64,                 /**< DTES64 */
+       RTE_CPUFLAG_MONITOR,                /**< MONITOR */
+       RTE_CPUFLAG_DS_CPL,                 /**< DS_CPL */
+       RTE_CPUFLAG_VMX,                    /**< VMX */
+       RTE_CPUFLAG_SMX,                    /**< SMX */
+       RTE_CPUFLAG_EIST,                   /**< EIST */
+       RTE_CPUFLAG_TM2,                    /**< TM2 */
+       RTE_CPUFLAG_SSSE3,                  /**< SSSE3 */
+       RTE_CPUFLAG_CNXT_ID,                /**< CNXT_ID */
+       RTE_CPUFLAG_FMA,                    /**< FMA */
+       RTE_CPUFLAG_CMPXCHG16B,             /**< CMPXCHG16B */
+       RTE_CPUFLAG_XTPR,                   /**< XTPR */
+       RTE_CPUFLAG_PDCM,                   /**< PDCM */
+       RTE_CPUFLAG_PCID,                   /**< PCID */
+       RTE_CPUFLAG_DCA,                    /**< DCA */
+       RTE_CPUFLAG_SSE4_1,                 /**< SSE4_1 */
+       RTE_CPUFLAG_SSE4_2,                 /**< SSE4_2 */
+       RTE_CPUFLAG_X2APIC,                 /**< X2APIC */
+       RTE_CPUFLAG_MOVBE,                  /**< MOVBE */
+       RTE_CPUFLAG_POPCNT,                 /**< POPCNT */
+       RTE_CPUFLAG_TSC_DEADLINE,           /**< TSC_DEADLINE */
+       RTE_CPUFLAG_AES,                    /**< AES */
+       RTE_CPUFLAG_XSAVE,                  /**< XSAVE */
+       RTE_CPUFLAG_OSXSAVE,                /**< OSXSAVE */
+       RTE_CPUFLAG_AVX,                    /**< AVX */
+       RTE_CPUFLAG_F16C,                   /**< F16C */
+       RTE_CPUFLAG_RDRAND,                 /**< RDRAND */
+
+       /* (EAX 01h) EDX features */
+       RTE_CPUFLAG_FPU,                    /**< FPU */
+       RTE_CPUFLAG_VME,                    /**< VME */
+       RTE_CPUFLAG_DE,                     /**< DE */
+       RTE_CPUFLAG_PSE,                    /**< PSE */
+       RTE_CPUFLAG_TSC,                    /**< TSC */
+       RTE_CPUFLAG_MSR,                    /**< MSR */
+       RTE_CPUFLAG_PAE,                    /**< PAE */
+       RTE_CPUFLAG_MCE,                    /**< MCE */
+       RTE_CPUFLAG_CX8,                    /**< CX8 */
+       RTE_CPUFLAG_APIC,                   /**< APIC */
+       RTE_CPUFLAG_SEP,                    /**< SEP */
+       RTE_CPUFLAG_MTRR,                   /**< MTRR */
+       RTE_CPUFLAG_PGE,                    /**< PGE */
+       RTE_CPUFLAG_MCA,                    /**< MCA */
+       RTE_CPUFLAG_CMOV,                   /**< CMOV */
+       RTE_CPUFLAG_PAT,                    /**< PAT */
+       RTE_CPUFLAG_PSE36,                  /**< PSE36 */
+       RTE_CPUFLAG_PSN,                    /**< PSN */
+       RTE_CPUFLAG_CLFSH,                  /**< CLFSH */
+       RTE_CPUFLAG_DS,                     /**< DS */
+       RTE_CPUFLAG_ACPI,                   /**< ACPI */
+       RTE_CPUFLAG_MMX,                    /**< MMX */
+       RTE_CPUFLAG_FXSR,                   /**< FXSR */
+       RTE_CPUFLAG_SSE,                    /**< SSE */
+       RTE_CPUFLAG_SSE2,                   /**< SSE2 */
+       RTE_CPUFLAG_SS,                     /**< SS */
+       RTE_CPUFLAG_HTT,                    /**< HTT */
+       RTE_CPUFLAG_TM,                     /**< TM */
+       RTE_CPUFLAG_PBE,                    /**< PBE */
+
+       /* (EAX 06h) EAX features */
+       RTE_CPUFLAG_DIGTEMP,                /**< DIGTEMP */
+       RTE_CPUFLAG_TRBOBST,                /**< TRBOBST */
+       RTE_CPUFLAG_ARAT,                   /**< ARAT */
+       RTE_CPUFLAG_PLN,                    /**< PLN */
+       RTE_CPUFLAG_ECMD,                   /**< ECMD */
+       RTE_CPUFLAG_PTM,                    /**< PTM */
+
+       /* (EAX 06h) ECX features */
+       RTE_CPUFLAG_MPERF_APERF_MSR,        /**< MPERF_APERF_MSR */
+       RTE_CPUFLAG_ACNT2,                  /**< ACNT2 */
+       RTE_CPUFLAG_ENERGY_EFF,             /**< ENERGY_EFF */
+
+       /* (EAX 07h, ECX 0h) EBX features */
+       RTE_CPUFLAG_FSGSBASE,               /**< FSGSBASE */
+       RTE_CPUFLAG_BMI1,                   /**< BMI1 */
+       RTE_CPUFLAG_AVX2,                   /**< AVX2 */
+       RTE_CPUFLAG_SMEP,                   /**< SMEP */
+       RTE_CPUFLAG_BMI2,                   /**< BMI2 */
+       RTE_CPUFLAG_ERMS,                   /**< ERMS */
+       RTE_CPUFLAG_INVPCID,                /**< INVPCID */
+
+       /* (EAX 80000001h) ECX features */
+       RTE_CPUFLAG_LAHF_SAHF,              /**< LAHF_SAHF */
+       RTE_CPUFLAG_LZCNT,                  /**< LZCNT */
+
+       /* (EAX 80000001h) EDX features */
+       RTE_CPUFLAG_SYSCALL,                /**< SYSCALL */
+       RTE_CPUFLAG_XD,                     /**< XD */
+       RTE_CPUFLAG_1GB_PG,                 /**< 1GB_PG */
+       RTE_CPUFLAG_RDTSCP,                 /**< RDTSCP */
+       RTE_CPUFLAG_EM64T,                  /**< EM64T */
+
+       /* (EAX 80000007h) EDX features */
+       RTE_CPUFLAG_INVTSC,                 /**< INVTSC */
+
+       /* The last item */
+       RTE_CPUFLAG_NUMFLAGS,               /**< This should always be the last! */
+};
+
+
+/**
+ * Function for checking a CPU flag availability
+ *
+ * @param flag
+ *     CPU flag to query CPU for
+ * @return
+ *  1 if flag is available
+ *  0 if flag is not available
+ *  -ENOENT if flag is invalid
+ */
+int
+rte_cpu_get_flag_enabled(enum rte_cpu_flag_t flag);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_CPUFLAGS_H_ */
diff --git a/lib/librte_eal/common/include/rte_cycles.h b/lib/librte_eal/common/include/rte_cycles.h
new file mode 100644 (file)
index 0000000..a1eca6c
--- /dev/null
@@ -0,0 +1,120 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_CYCLES_H_
+#define _RTE_CYCLES_H_
+
+/**
+ * @file
+ *
+ * Simple Time Reference Functions (Cycles and HPET).
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Read the TSC register.
+ *
+ * @return
+ *   The TSC for this lcore.
+ */
+static inline uint64_t
+rte_rdtsc(void)
+{
+       union {
+               uint64_t tsc_64;
+               struct {
+                       uint32_t lo_32;
+                       uint32_t hi_32;
+               };
+       } tsc;
+
+       asm volatile("rdtsc" :
+                    "=a" (tsc.lo_32),
+                    "=d" (tsc.hi_32));
+       return tsc.tsc_64;
+}
+
+/**
+ * Return the number of HPET cycles since boot
+ *
+ * This counter is global for all execution units. The number of
+ * cycles in one second can be retrived using rte_get_hpet_hz().
+ *
+ * @return
+ *   the number of cycles
+ */
+uint64_t
+rte_get_hpet_cycles(void);
+
+/**
+ * Get the number of cycles in one second.
+ *
+ * @return
+ *   The number of cycles in one second.
+ */
+uint64_t
+rte_get_hpet_hz(void);
+
+/**
+ * Wait at least us microseconds.
+ *
+ * @param us
+ *   The number of microseconds to wait.
+ */
+void
+rte_delay_us(unsigned us);
+
+/**
+ * Wait at least ms milliseconds.
+ *
+ * @param ms
+ *   The number of milliseconds to wait.
+ */
+static inline void
+rte_delay_ms(unsigned ms)
+{
+       rte_delay_us(ms * 1000);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_CYCLES_H_ */
diff --git a/lib/librte_eal/common/include/rte_debug.h b/lib/librte_eal/common/include/rte_debug.h
new file mode 100644 (file)
index 0000000..451220e
--- /dev/null
@@ -0,0 +1,96 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_DEBUG_H_
+#define _RTE_DEBUG_H_
+
+/**
+ * @file
+ *
+ * Debug Functions in RTE
+ *
+ * This file defines a generic API for debug operations. Part of
+ * the implementation is architecture-specific.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Dump the stack of the calling core to the console.
+ */
+void rte_dump_stack(void);
+
+/**
+ * Dump the registers of the calling core to the console.
+ *
+ * Note: Not implemented in a userapp environment; use gdb instead.
+ */
+void rte_dump_registers(void);
+
+/**
+ * Provide notification of a critical non-recoverable error and terminate
+ * execution abnormally.
+ *
+ * Display the format string and its expanded arguments (printf-like).
+ *
+ * In a linuxapp environment, this function dumps the stack and calls
+ * abort() resulting in a core dump if enabled.
+ *
+ * The function never returns.
+ *
+ * @param format
+ *   The format string
+ * @param args
+ *   The variable list of arguments.
+ */
+#define rte_panic(format, args...) __rte_panic(__func__, format, ## args)
+
+/*
+ * Provide notification of a critical non-recoverable error and stop.
+ *
+ * This function should not be called directly. Refer to rte_panic() macro
+ * documentation.
+ */
+void __rte_panic(const char *funcname , const char *format, ...)
+       __attribute__((noreturn))
+       __attribute__((format(printf, 2, 3)));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_DEBUG_H_ */
diff --git a/lib/librte_eal/common/include/rte_eal.h b/lib/librte_eal/common/include/rte_eal.h
new file mode 100644 (file)
index 0000000..58fa1cc
--- /dev/null
@@ -0,0 +1,174 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_EAL_H_
+#define _RTE_EAL_H_
+
+/**
+ * @file
+ *
+ * EAL Configuration API
+ */
+
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_VERSION 1 /**< The version of the RTE configuration structure. */
+#define RTE_MAGIC 19820526 /**< Magic number written by the main partition when ready. */
+
+/**
+ * The lcore role (used in RTE or not).
+ */
+enum rte_lcore_role_t {
+       ROLE_RTE,
+       ROLE_OFF,
+};
+
+/**
+ * The type of process in a linuxapp, multi-process setup
+ */
+enum rte_proc_type_t {
+       RTE_PROC_AUTO = -1,   /* allow auto-detection of primary/secondary */
+       RTE_PROC_PRIMARY = 0, /* set to zero, so primary is the default */
+       RTE_PROC_SECONDARY,
+
+       RTE_PROC_INVALID
+};
+
+/**
+ * the structure for the memory configuration for the RTE.
+ * Used by the rte_config structure. It is separated out, as for multi-process
+ * support, the memory details should be shared across instances
+ */
+struct rte_mem_config {
+       /* memory topology */
+       uint32_t nchannel;    /**< Number of channels (0 if unknown). */
+       uint32_t nrank;       /**< Number of ranks (0 if unknown). */
+
+       /* memory segments and zones */
+       struct rte_memseg memseg[RTE_MAX_MEMSEG];    /**< Physmem descriptors. */
+       struct rte_memzone memzone[RTE_MAX_MEMZONE]; /**< Memzone descriptors. */
+
+       struct rte_tailq_head tailq_head[RTE_MAX_TAILQ]; /**< Tailqs for objects */
+} __attribute__((__packed__));
+
+/**
+ * The global RTE configuration structure.
+ */
+struct rte_config {
+       uint32_t version; /**< Configuration [structure] version. */
+       uint32_t magic;   /**< Magic number - Sanity check. */
+
+
+       uint32_t master_lcore;       /**< Id of the master lcore */
+       uint32_t lcore_count;        /**< Number of available logical cores. */
+       enum rte_lcore_role_t lcore_role[RTE_MAX_LCORE]; /**< State of cores. */
+
+       /** Primary or secondary configuration */
+       enum rte_proc_type_t process_type;
+
+       /**
+        * Pointer to memory configuration, which may be shared across multiple
+        * Intel DPDK instances
+        */
+       struct rte_mem_config *mem_config;
+} __attribute__((__packed__));
+
+/**
+ * Get the global configuration structure.
+ *
+ * @return
+ *   A pointer to the global configuration structure.
+ */
+struct rte_config *rte_eal_get_configuration(void);
+
+/**
+ * Get a lcore's role.
+ *
+ * @param lcore_id
+ *   The identifier of the lcore.
+ * @return
+ *   The role of the lcore.
+ */
+enum rte_lcore_role_t rte_eal_lcore_role(unsigned lcore_id);
+
+
+/**
+ * Get the process type in a multi-process setup
+ *
+ * @return
+ *   The process type
+ */
+enum rte_proc_type_t rte_eal_process_type(void);
+
+/**
+ * Initialize the Environment Abstraction Layer (EAL).
+ *
+ * This function is to be executed on the MASTER lcore only, as soon
+ * as possible in the application's main() function.
+ *
+ * The function finishes the initialization process that was started
+ * during boot (in case of baremetal) or before main() is called (in
+ * case of linuxapp). It puts the SLAVE lcores in the WAIT state.
+ *
+ * When the multi-partition feature is supported, depending on the
+ * configuration (if CONFIG_RTE_EAL_MAIN_PARTITION is disabled), this
+ * function waits to ensure that the magic number is set before
+ * returning. See also the rte_eal_get_configuration() function. Note:
+ * This behavior may change in the future.
+ *
+ * @param argc
+ *   The argc argument that was given to the main() function.
+ * @param argv
+ *   The argv argument that was given to the main() function.
+ * @return
+ *   - On success, the number of parsed arguments, which is greater or
+ *     equal to zero. After the call to rte_eal_init(),
+ *     all arguments argv[x] with x < ret may be modified and should
+ *     not be accessed by the application.
+ *   - On failure, a negative error value.
+ */
+int rte_eal_init(int argc, char **argv);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_EAL_H_ */
diff --git a/lib/librte_eal/common/include/rte_errno.h b/lib/librte_eal/common/include/rte_errno.h
new file mode 100644 (file)
index 0000000..53f7b40
--- /dev/null
@@ -0,0 +1,98 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ *
+ * API for error cause tracking
+ */
+
+#ifndef _RTE_ERRNO_H_
+#define _RTE_ERRNO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_per_lcore.h>
+
+RTE_DECLARE_PER_LCORE(int, _rte_errno); /**< Per core error number. */
+
+/**
+ * Error number value, stored per-thread, which can be queried after
+ * calls to certain functions to determine why those functions failed.
+ *
+ * Uses standard values from errno.h wherever possible, with a small number
+ * of additional possible values for RTE-specific conditions.
+ */
+#define rte_errno RTE_PER_LCORE(_rte_errno)
+
+/**
+ * Function which returns a printable string describing a particular
+ * error code. For non-RTE-specific error codes, this function returns
+ * the value from the libc strerror function.
+ *
+ * @param errnum
+ *   The error number to be looked up - generally the value of rte_errno
+ * @return
+ *   A pointer to a thread-local string containing the text describing
+ *   the error.
+ */
+const char *rte_strerror(int errnum);
+
+#ifndef __ELASTERROR
+/**
+ * Check if we have a defined value for the max system-defined errno values.
+ * if no max defined, start from 1000 to prevent overlap with standard values
+ */
+#define __ELASTERROR 1000
+#endif
+
+/** Error types */
+enum {
+       RTE_MIN_ERRNO = __ELASTERROR, /**< Start numbering above std errno vals */
+
+       E_RTE_SECONDARY, /**< Operation not allowed in secondary processes */
+       E_RTE_NO_CONFIG, /**< Missing rte_config */
+       E_RTE_NO_TAILQ,  /**< Uninitialised TAILQ */
+
+       RTE_MAX_ERRNO    /**< Max RTE error number */
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ERRNO_H_ */
diff --git a/lib/librte_eal/common/include/rte_interrupts.h b/lib/librte_eal/common/include/rte_interrupts.h
new file mode 100644 (file)
index 0000000..151df98
--- /dev/null
@@ -0,0 +1,123 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#define _RTE_INTERRUPTS_H_
+
+/**
+ * @file
+ *
+ * The RTE interrupt interface provides functions to register/unregister
+ * callbacks for a specific interrupt.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Interupt handle */
+struct rte_intr_handle;
+
+/** Function to be registered for the specific interrupt */
+typedef void (*rte_intr_callback_fn)(struct rte_intr_handle *intr_handle,
+                                                       void *cb_arg);
+
+#include <exec-env/rte_interrupts.h>
+
+/**
+ * It registers the callback for the specific interrupt. Multiple
+ * callbacks cal be registered at the same time.
+ * @param intr_handle
+ *  Pointer to the interrupt handle.
+ * @param cb
+ *  callback address.
+ * @param cb_arg
+ *  address of parameter for callback.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+                               rte_intr_callback_fn cb, void *cb_arg);
+
+/**
+ * It unregisters the callback according to the specified interrupt handle.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ * @param cb
+ *  callback address.
+ * @param cb_arg
+ *  address of parameter for callback, (void *)-1 means to remove all
+ *  registered which has the same callback address.
+ *
+ * @return
+ *  - On success, return the number of callback entities removed.
+ *  - On failure, a negative value.
+ */
+int rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+                               rte_intr_callback_fn cb, void *cb_arg);
+
+/**
+ * It enables the interrupt for the specified handle.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_intr_enable(struct rte_intr_handle *intr_handle);
+
+/**
+ * It disables the interrupt for the specified handle.
+ *
+ * @param intr_handle
+ *  pointer to the interrupt handle.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_intr_disable(struct rte_intr_handle *intr_handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/lib/librte_eal/common/include/rte_launch.h b/lib/librte_eal/common/include/rte_launch.h
new file mode 100644 (file)
index 0000000..e8ad0a5
--- /dev/null
@@ -0,0 +1,179 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_LAUNCH_H_
+#define _RTE_LAUNCH_H_
+
+/**
+ * @file
+ *
+ * Launch tasks on other lcores
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * State of an lcore.
+ */
+enum rte_lcore_state_t {
+       WAIT,       /**< waiting a new command */
+       RUNNING,    /**< executing command */
+       FINISHED,   /**< command executed */
+};
+
+/**
+ * Definition of a remote launch function.
+ */
+typedef int (lcore_function_t)(void *);
+
+/**
+ * Launch a function on another lcore.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * Sends a message to a slave lcore (identified by the slave_id) that
+ * is in the WAIT state (this is true after the first call to
+ * rte_eal_init()). This can be checked by first calling
+ * rte_eal_wait_lcore(slave_id).
+ *
+ * When the remote lcore receives the message, it switches to
+ * the RUNNING state, then calls the function f with argument arg. Once the
+ * execution is done, the remote lcore switches to a FINISHED state and
+ * the return value of f is stored in a local variable to be read using
+ * rte_eal_wait_lcore().
+ *
+ * The MASTER lcore returns as soon as the message is sent and knows
+ * nothing about the completion of f.
+ *
+ * Note: This function is not designed to offer optimum
+ * performance. It is just a practical way to launch a function on
+ * another lcore at initialization time.
+ *
+ * @param f
+ *   The function to be called.
+ * @param arg
+ *   The argument for the function.
+ * @param slave_id
+ *   The identifier of the lcore on which the function should be executed.
+ * @return
+ *   - 0: Success. Execution of function f started on the remote lcore.
+ *   - (-EBUSY): The remote lcore is not in a WAIT state.
+ */
+int rte_eal_remote_launch(lcore_function_t *f, void *arg, unsigned slave_id);
+
+/**
+ * This enum indicates whether the master core must execute the handler
+ * launched on all logical cores.
+ */
+enum rte_rmt_call_master_t {
+       SKIP_MASTER = 0, /**< lcore handler not executed by master core. */
+       CALL_MASTER,     /**< lcore handler executed by master core. */
+};
+
+/**
+ * Launch a function on all lcores.
+ *
+ * Check that each SLAVE lcore is in a WAIT state, then call
+ * rte_eal_remote_launch() for each lcore.
+ *
+ * @param f
+ *   The function to be called.
+ * @param arg
+ *   The argument for the function.
+ * @param call_master
+ *   If call_master set to SKIP_MASTER, the MASTER lcore does not call
+ *   the function. If call_master is set to CALL_MASTER, the function
+ *   is also called on master before returning. In any case, the master
+ *   lcore returns as soon as it finished its job and knows nothing
+ *   about the completion of f on the other lcores.
+ * @return
+ *   - 0: Success. Execution of function f started on all remote lcores.
+ *   - (-EBUSY): At least one remote lcore is not in a WAIT state. In this
+ *     case, no message is sent to any of the lcores.
+ */
+int rte_eal_mp_remote_launch(lcore_function_t *f, void *arg,
+                            enum rte_rmt_call_master_t call_master);
+
+/**
+ * Get the state of the lcore identified by slave_id.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * @param slave_id
+ *   The identifier of the lcore.
+ * @return
+ *   The state of the lcore.
+ */
+enum rte_lcore_state_t rte_eal_get_lcore_state(unsigned slave_id);
+
+/**
+ * Wait until an lcore finishes its job.
+ *
+ * To be executed on the MASTER lcore only.
+ *
+ * If the slave lcore identified by the slave_id is in a FINISHED state,
+ * switch to the WAIT state. If the lcore is in RUNNING state, wait until
+ * the lcore finishes its job and moves to the FINISHED state.
+ *
+ * @param slave_id
+ *   The identifier of the lcore.
+ * @return
+ *   - 0: If the lcore identified by the slave_id is in a WAIT state.
+ *   - The value that was returned by the previous remote launch
+ *     function call if the lcore identified by the slave_id was in a
+ *     FINISHED or RUNNING state. In this case, it changes the state
+ *     of the lcore to WAIT.
+ */
+int rte_eal_wait_lcore(unsigned slave_id);
+
+/**
+ * Wait until all lcores finish their jobs.
+ *
+ * To be executed on the MASTER lcore only. Issue an
+ * rte_eal_wait_lcore() for every lcore. The return values are
+ * ignored.
+ *
+ * After a call to rte_eal_mp_wait_lcores(), the caller can assume
+ * that all slave lcores are in a WAIT state.
+ */
+void rte_eal_mp_wait_lcore(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LAUNCH_H_ */
diff --git a/lib/librte_eal/common/include/rte_lcore.h b/lib/librte_eal/common/include/rte_lcore.h
new file mode 100644 (file)
index 0000000..f9308c4
--- /dev/null
@@ -0,0 +1,191 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_LCORE_H_
+#define _RTE_LCORE_H_
+
+/**
+ * @file
+ *
+ * API for lcore and Socket Manipulation. Parts of this are execution
+ * environment specific.
+ *
+ */
+#include <rte_per_lcore.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define LCORE_ID_ANY -1    /**< Any lcore. */
+
+RTE_DECLARE_PER_LCORE(unsigned, _lcore_id); /**< Per core "core id". */
+
+/**
+ * Return the ID of the execution unit we are running on.
+ * @return
+ *  Logical core ID
+ */
+static inline unsigned
+rte_lcore_id(void)
+{
+       return RTE_PER_LCORE(_lcore_id);
+}
+
+/**
+ * Get the id of the master lcore
+ *
+ * @return
+ *   the id of the master lcore
+ */
+static inline unsigned
+rte_get_master_lcore(void)
+{
+       return rte_eal_get_configuration()->master_lcore;
+}
+
+/**
+ * Return the number of execution units (lcores) on the system.
+ *
+ * @return
+ *   the number of execution units (lcores) on the system.
+ */
+static inline unsigned
+rte_lcore_count(void)
+{
+       const struct rte_config *cfg = rte_eal_get_configuration();
+       return cfg->lcore_count;
+}
+
+#include <exec-env/rte_lcore.h>
+
+#ifdef __DOXYGEN__
+/**
+ * Return the ID of the physical socket of the logical core we are
+ * running on.
+ * @return
+ *   Socket ID
+ */
+static inline unsigned
+rte_socket_id(void);
+
+/**
+ * Get the ID of the physical socket of the specified lcore
+ *
+ * @param lcore_id
+ *   the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1.
+ * @return
+ *   the ID of lcoreid's physical socket
+ */
+static inline unsigned
+rte_lcore_to_socket_id(unsigned lcore_id);
+
+#endif
+
+
+/**
+ * Test if an lcore is enabled.
+ *
+ * @param lcore_id
+ *   The identifier of the lcore, which MUST be between 0 and
+ *   RTE_MAX_LCORE-1.
+ * @return
+ *   True if the given lcore is enabled; false otherwise.
+ */
+static inline int
+rte_lcore_is_enabled(unsigned lcore_id)
+{
+       struct rte_config *cfg = rte_eal_get_configuration();
+       if (lcore_id >= RTE_MAX_LCORE)
+               return 0;
+       return (cfg->lcore_role[lcore_id] != ROLE_OFF);
+}
+
+/**
+ * Get the next enabled lcore ID.
+ *
+ * @param i
+ *   The current lcore (reference).
+ * @param skip_master
+ *   If true, do not return the ID of the master lcore.
+ * @param wrap
+ *   If true, go back to 0 when RTE_MAX_LCORE is reached; otherwise,
+ *   return RTE_MAX_LCORE.
+ * @return
+ *   The next lcore_id or RTE_MAX_LCORE if not found.
+ */
+static inline unsigned
+rte_get_next_lcore(unsigned i, int skip_master, int wrap)
+{
+       i++;
+       if (wrap)
+               i %= RTE_MAX_LCORE;
+
+       while (i < RTE_MAX_LCORE) {
+               if (!rte_lcore_is_enabled(i) ||
+                   (skip_master && (i == rte_get_master_lcore()))) {
+                       i++;
+                       if (wrap)
+                               i %= RTE_MAX_LCORE;
+                       continue;
+               }
+               break;
+       }
+       return i;
+}
+/**
+ * Macro to browse all running lcores.
+ */
+#define RTE_LCORE_FOREACH(i)                                           \
+       for (i = rte_get_next_lcore(-1, 0, 0);                          \
+            i<RTE_MAX_LCORE;                                           \
+            i = rte_get_next_lcore(i, 0, 0))
+
+/**
+ * Macro to browse all running lcores except the master lcore.
+ */
+#define RTE_LCORE_FOREACH_SLAVE(i)                                     \
+       for (i = rte_get_next_lcore(-1, 1, 0);                          \
+            i<RTE_MAX_LCORE;                                           \
+            i = rte_get_next_lcore(i, 1, 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_LCORE_H_ */
diff --git a/lib/librte_eal/common/include/rte_log.h b/lib/librte_eal/common/include/rte_log.h
new file mode 100644 (file)
index 0000000..d361130
--- /dev/null
@@ -0,0 +1,290 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_LOG_H_
+#define _RTE_LOG_H_
+
+/**
+ * @file
+ *
+ * RTE Logs API
+ *
+ * This file provides a log API to RTE applications.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+
+/** The rte_log structure. */
+struct rte_logs {
+       uint32_t type;  /**< Bitfield with enabled logs. */
+       uint32_t level; /**< Log level. */
+       FILE *file;     /**< Pointer to current FILE* for logs. */
+};
+
+/** Global log informations */
+extern struct rte_logs rte_logs;
+
+/* SDK log type */
+#define RTE_LOGTYPE_EAL     0x00000001 /**< Log related to eal. */
+#define RTE_LOGTYPE_MALLOC  0x00000002 /**< Log related to malloc. */
+#define RTE_LOGTYPE_RING    0x00000004 /**< Log related to ring. */
+#define RTE_LOGTYPE_MEMPOOL 0x00000008 /**< Log related to mempool. */
+#define RTE_LOGTYPE_TIMER   0x00000010 /**< Log related to timers. */
+#define RTE_LOGTYPE_PMD     0x00000020 /**< Log related to poll mode driver. */
+#define RTE_LOGTYPE_HASH    0x00000040 /**< Log related to hash table. */
+#define RTE_LOGTYPE_LPM     0x00000080 /**< Log related to LPM. */
+
+/* these log types can be used in an application */
+#define RTE_LOGTYPE_USER1   0x01000000 /**< User-defined log type 1. */
+#define RTE_LOGTYPE_USER2   0x02000000 /**< User-defined log type 2. */
+#define RTE_LOGTYPE_USER3   0x04000000 /**< User-defined log type 3. */
+#define RTE_LOGTYPE_USER4   0x08000000 /**< User-defined log type 4. */
+#define RTE_LOGTYPE_USER5   0x10000000 /**< User-defined log type 5. */
+#define RTE_LOGTYPE_USER6   0x20000000 /**< User-defined log type 6. */
+#define RTE_LOGTYPE_USER7   0x40000000 /**< User-defined log type 7. */
+#define RTE_LOGTYPE_USER8   0x80000000 /**< User-defined log type 8. */
+
+/* Can't use 0, as it gives compiler warnings */
+#define RTE_LOG_EMERG    1U  /**< System is unusable.               */
+#define RTE_LOG_ALERT    2U  /**< Action must be taken immediately. */
+#define RTE_LOG_CRIT     3U  /**< Critical conditions.              */
+#define RTE_LOG_ERR      4U  /**< Error conditions.                 */
+#define RTE_LOG_WARNING  5U  /**< Warning conditions.               */
+#define RTE_LOG_NOTICE   6U  /**< Normal but significant condition. */
+#define RTE_LOG_INFO     7U  /**< Informational.                    */
+#define RTE_LOG_DEBUG    8U  /**< Debug-level messages.             */
+
+/** The default log stream. */
+extern FILE *eal_default_log_stream;
+
+/**
+ * Change the stream that will be used by the logging system.
+ *
+ * This can be done at any time. The f argument represents the stream
+ * to be used to send the logs. If f is NULL, the default output is
+ * used, which is the serial line in case of bare metal, or directly
+ * sent to syslog in case of linux application.
+ *
+ * @param f
+ *   Pointer to the stream.
+ * @return
+ *   - 0 on success.
+ *   - Negative on error.
+ */
+int rte_openlog_stream(FILE *f);
+
+/**
+ * Set the global log level.
+ *
+ * After this call, all logs that are lower or equal than level and
+ * lower or equal than the RTE_LOG_LEVEL configuration option will be
+ * displayed.
+ *
+ * @param level
+ *   Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ */
+void rte_set_log_level(uint32_t level);
+
+/**
+ * Enable or disable the log type.
+ *
+ * @param type
+ *   Log type, for example, RTE_LOGTYPE_EAL.
+ * @param enable
+ *   True for enable; false for disable.
+ */
+void rte_set_log_type(uint32_t type, int enable);
+
+/**
+ * Get the current loglevel for the message being processed.
+ *
+ * Before calling the user-defined stream for logging, the log
+ * subsystem sets a per-lcore variable containing the loglevel and the
+ * logtype of the message being processed. This information can be
+ * accessed by the user-defined log output function through this
+ * function.
+ *
+ * @return
+ *   The loglevel of the message being processed.
+ */
+int rte_log_cur_msg_loglevel(void);
+
+/**
+ * Get the current logtype for the message being processed.
+ *
+ * Before calling the user-defined stream for logging, the log
+ * subsystem sets a per-lcore variable containing the loglevel and the
+ * logtype of the message being processed. This information can be
+ * accessed by the user-defined log output function through this
+ * function.
+ *
+ * @return
+ *   The logtype of the message being processed.
+ */
+int rte_log_cur_msg_logtype(void);
+
+/**
+ * Enable or disable the history (enabled by default)
+ *
+ * @param enable
+ *   true to enable, or 0 to disable history.
+ */
+void rte_log_set_history(int enable);
+
+/**
+ * Dump the log history to the console.
+ */
+void rte_log_dump_history(void);
+
+/**
+ * Add a log message to the history.
+ *
+ * This function can be called from a user-defined log stream. It adds
+ * the given message in the history that can be dumped using
+ * rte_log_dump_history().
+ *
+ * @param buf
+ *   A data buffer containing the message to be saved in the history.
+ * @param size
+ *   The length of the data buffer.
+ * @return
+ *   - 0: Success.
+ *   - (-ENOBUFS) if there is no room to store the message.
+ */
+int rte_log_add_in_history(const char *buf, size_t size);
+
+/**
+ * Generates a log message.
+ *
+ * The message will be sent in the stream defined by the previous call
+ * to rte_openlog_stream().
+ *
+ * The level argument determines if the log should be displayed or
+ * not, depending on the global rte_logs variable.
+ *
+ * The preferred alternative is the RTE_LOG() function because debug logs may
+ * be removed at compilation time if optimization is enabled. Moreover,
+ * logs are automatically prefixed by type when using the macro.
+ *
+ * @param level
+ *   Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ * @param logtype
+ *   The log type, for example, RTE_LOGTYPE_EAL.
+ * @param format
+ *   The format string, as in printf(3), followed by the variable arguments
+ *   required by the format.
+ * @return
+ *   - 0: Success.
+ *   - Negative on error.
+ */
+int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
+       __attribute__((format(printf, 3, 4)));
+
+/**
+ * Generates a log message.
+ *
+ * The message will be sent in the stream defined by the previous call
+ * to rte_openlog_stream().
+ *
+ * The level argument determines if the log should be displayed or
+ * not, depending on the global rte_logs variable. A trailing
+ * newline may be added if needed.
+ *
+ * The preferred alternative is the RTE_LOG() because debug logs may be
+ * removed at compilation time.
+ *
+ * @param level
+ *   Log level. A value between RTE_LOG_EMERG (1) and RTE_LOG_DEBUG (8).
+ * @param logtype
+ *   The log type, for example, RTE_LOGTYPE_EAL.
+ * @param format
+ *   The format string, as in printf(3), followed by the variable arguments
+ *   required by the format.
+ * @param ap
+ *   The va_list of the variable arguments required by the format.
+ * @return
+ *   - 0: Success.
+ *   - Negative on error.
+ */
+int rte_vlog(uint32_t level, uint32_t logtype, const char *format, va_list ap);
+
+/**
+ * Generates a log message.
+ *
+ * The RTE_LOG() is equivalent to rte_log() with two differences:
+
+ * - RTE_LOG() can be used to remove debug logs at compilation time,
+ *   depending on RTE_LOG_LEVEL configuration option, and compilation
+ *   optimization level. If optimization is enabled, the tests
+ *   involving constants only are pre-computed. If compilation is done
+ *   with -O0, these tests will be done at run time.
+ * - The log level and log type names are smaller, for example:
+ *   RTE_LOG(INFO, EAL, "this is a %s", "log");
+ *
+ * @param l
+ *   Log level. A value between EMERG (1) and DEBUG (8). The short name is
+ *   expanded by the macro, so it cannot be an integer value.
+ * @param t
+ *   The log type, for example, EAL. The short name is expanded by the
+ *   macro, so it cannot be an integer value.
+ * @param fmt
+ *   The fmt string, as in printf(3), followed by the variable arguments
+ *   required by the format.
+ * @param args
+ *   The variable list of arguments according to the format string.
+ * @return
+ *   - 0: Success.
+ *   - Negative on error.
+ */
+#define RTE_LOG(l, t, fmt, args...) ({                                 \
+       if ((RTE_LOG_##l <= RTE_LOG_LEVEL) &&                           \
+           (RTE_LOG_##l <= rte_logs.level) &&                          \
+           (RTE_LOGTYPE_##t & rte_logs.type)) {                        \
+               rte_log(RTE_LOG_##l, RTE_LOGTYPE_##t,                   \
+                         #t ": " fmt, ## args);                        \
+       }                                                               \
+})
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LOG_H_ */
diff --git a/lib/librte_eal/common/include/rte_memcpy.h b/lib/librte_eal/common/include/rte_memcpy.h
new file mode 100644 (file)
index 0000000..fd2a296
--- /dev/null
@@ -0,0 +1,355 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MEMCPY_H_
+#define _RTE_MEMCPY_H_
+
+/**
+ * @file
+ *
+ * Functions for SSE implementation of memcpy().
+ */
+
+#include <stdint.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Copy 16 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov16(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("movdqu (%[src]), %%xmm0\n\t"
+                     "movdqu %%xmm0, (%[dst])\n\t"
+                     :
+                     : [src] "r" (src),
+                       [dst] "r"(dst)
+                     : "xmm0", "memory");
+}
+
+/**
+ * Copy 32 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov32(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("movdqu (%[src]), %%xmm0\n\t"
+                     "movdqu 16(%[src]), %%xmm1\n\t"
+                     "movdqu %%xmm0, (%[dst])\n\t"
+                     "movdqu %%xmm1, 16(%[dst])"
+                     :
+                     : [src] "r" (src),
+                       [dst] "r"(dst)
+                     : "xmm0", "xmm1", "memory");
+}
+
+/**
+ * Copy 48 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov48(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("movdqu (%[src]), %%xmm0\n\t"
+                     "movdqu 16(%[src]), %%xmm1\n\t"
+                     "movdqu 32(%[src]), %%xmm2\n\t"
+                     "movdqu %%xmm0, (%[dst])\n\t"
+                     "movdqu %%xmm1, 16(%[dst])\n\t"
+                     "movdqu %%xmm2, 32(%[dst])"
+                     :
+                     : [src] "r" (src),
+                       [dst] "r"(dst)
+                     : "xmm0", "xmm1", "memory");
+}
+
+/**
+ * Copy 64 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov64(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("movdqu (%[src]), %%xmm0\n\t"
+                     "movdqu 16(%[src]), %%xmm1\n\t"
+                     "movdqu 32(%[src]), %%xmm2\n\t"
+                     "movdqu 48(%[src]), %%xmm3\n\t"
+                     "movdqu %%xmm0, (%[dst])\n\t"
+                     "movdqu %%xmm1, 16(%[dst])\n\t"
+                     "movdqu %%xmm2, 32(%[dst])\n\t"
+                     "movdqu %%xmm3, 48(%[dst])"
+                     :
+                     : [src] "r" (src),
+                       [dst] "r"(dst)
+                     : "xmm0", "xmm1", "xmm2", "xmm3","memory");
+}
+
+/**
+ * Copy 128 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov128(uint8_t *dst, const uint8_t *src)
+{
+       asm volatile ("movdqu (%[src]), %%xmm0\n\t"
+                     "movdqu 16(%[src]), %%xmm1\n\t"
+                     "movdqu 32(%[src]), %%xmm2\n\t"
+                     "movdqu 48(%[src]), %%xmm3\n\t"
+                     "movdqu 64(%[src]), %%xmm4\n\t"
+                     "movdqu 80(%[src]), %%xmm5\n\t"
+                     "movdqu 96(%[src]), %%xmm6\n\t"
+                     "movdqu 112(%[src]), %%xmm7\n\t"
+                     "movdqu %%xmm0, (%[dst])\n\t"
+                     "movdqu %%xmm1, 16(%[dst])\n\t"
+                     "movdqu %%xmm2, 32(%[dst])\n\t"
+                     "movdqu %%xmm3, 48(%[dst])\n\t"
+                     "movdqu %%xmm4, 64(%[dst])\n\t"
+                     "movdqu %%xmm5, 80(%[dst])\n\t"
+                     "movdqu %%xmm6, 96(%[dst])\n\t"
+                     "movdqu %%xmm7, 112(%[dst])"
+                     :
+                     : [src] "r" (src),
+                       [dst] "r"(dst)
+                     : "xmm0", "xmm1", "xmm2", "xmm3",
+                       "xmm4", "xmm5", "xmm6", "xmm7", "memory");
+}
+
+/**
+ * Copy 256 bytes from one location to another using optimised SSE
+ * instructions. The locations should not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ */
+static inline void
+rte_mov256(uint8_t *dst, const uint8_t *src)
+{
+       /*
+        * There are 16XMM registers, but this function does not use
+        * them all so that it can still be compiled as 32bit
+        * code. The performance increase was neglible if all 16
+        * registers were used.
+        */
+       rte_mov128(dst, src);
+       rte_mov128(dst + 128, src + 128);
+}
+
+#ifdef RTE_MEMCPY_BUILTIN_CONSTANT_P
+/**
+ * Choose between compiler built-in implementation of memcpy or DPDK
+ * implementation depending if size is a compile-time constant
+ */
+#define rte_memcpy(dst, src, n) \
+       (__builtin_constant_p (n) ? \
+       memcpy(dst, src, n) : rte_memcpy_func(dst, src, n))
+#else
+/**
+ * Always use DPDK implementation.
+ */
+#define rte_memcpy rte_memcpy_func
+#endif
+
+/**
+ * Copy bytes from one location to another. The locations must not overlap.
+ *
+ * @param dst
+ *   Pointer to the destination of the data.
+ * @param src
+ *   Pointer to the source data.
+ * @param n
+ *   Number of bytes to copy.
+ * @return
+ *   Pointer to the destination data.
+ */
+static inline void *
+rte_memcpy_func(void *dst, const void *src, size_t n)
+{
+       void *ret = dst;
+
+       /* We can't copy < 16 bytes using XMM registers so do it manually. */
+       if (n < 16) {
+               if (n & 0x01) {
+                       *(uint8_t *)dst = *(const uint8_t *)src;
+                       dst = (uint8_t *)dst + 1;
+                       src = (const uint8_t *)src + 1;
+               }
+               if (n & 0x02) {
+                       *(uint16_t *)dst = *(const uint16_t *)src;
+                       dst = (uint16_t *)dst + 1;
+                       src = (const uint16_t *)src + 1;
+               }
+               if (n & 0x04) {
+                       /*
+                        * NOTE: doing this as a 32bit copy causes "strict
+                        * aliasing" compile errors, but worked fine for 64bit
+                        * copy below, for unknown reasons.
+                        */
+                       *(uint16_t *)dst = *(const uint16_t *)src;
+                       *((uint16_t *)dst + 1) = *((const uint16_t *)src + 1);
+                       dst = (uint32_t *)dst + 1;
+                       src = (const uint32_t *)src + 1;
+               }
+               if (n & 0x08) {
+                       *(uint64_t *)dst = *(const uint64_t *)src;
+               }
+               return ret;
+       }
+
+       /* Special fast cases for <= 128 bytes */
+       if (n <= 32) {
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+               return ret;
+       }
+
+       if (n <= 64) {
+               rte_mov32((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov32((uint8_t *)dst - 32 + n, (const uint8_t *)src - 32 + n);
+               return ret;
+       }
+
+       if (n <= 128) {
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               rte_mov64((uint8_t *)dst - 64 + n, (const uint8_t *)src - 64 + n);
+               return ret;
+       }
+
+       /*
+        * For large copies > 128 bytes. This combination of 256, 64 and 16 byte
+        * copies was found to be faster than doing 128 and 32 byte copies as
+        * well.
+        */
+       for ( ; n >= 256; n -= 256) {
+               rte_mov256((uint8_t *)dst, (const uint8_t *)src);
+               dst = (uint8_t *)dst + 256;
+               src = (const uint8_t *)src + 256;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 256) into
+        * 64byte (2^6) chunks.
+        * Using incrementing integers in the case labels of a switch statement
+        * enourages the compiler to use a jump table. To get incrementing
+        * integers, we shift the 2 relevant bits to the LSB position to first
+        * get decrementing integers, and then subtract.
+        */
+       switch (3 - (n >> 6)) {
+       case 0x00:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x01:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       case 0x02:
+               rte_mov64((uint8_t *)dst, (const uint8_t *)src);
+               n -= 64;
+               dst = (uint8_t *)dst + 64;
+               src = (const uint8_t *)src + 64;      /* fallthrough */
+       default:
+               ;
+       }
+
+       /*
+        * We split the remaining bytes (which will be less than 64) into
+        * 16byte (2^4) chunks, using the same switch structure as above.
+        */
+       switch (3 - (n >> 4)) {
+       case 0x00:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x01:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       case 0x02:
+               rte_mov16((uint8_t *)dst, (const uint8_t *)src);
+               n -= 16;
+               dst = (uint8_t *)dst + 16;
+               src = (const uint8_t *)src + 16;      /* fallthrough */
+       default:
+               ;
+       }
+
+       /* Copy any remaining bytes, without going beyond end of buffers */
+       if (n != 0) {
+               rte_mov16((uint8_t *)dst - 16 + n, (const uint8_t *)src - 16 + n);
+       }
+       return ret;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMCPY_H_ */
diff --git a/lib/librte_eal/common/include/rte_memory.h b/lib/librte_eal/common/include/rte_memory.h
new file mode 100644 (file)
index 0000000..bf843dc
--- /dev/null
@@ -0,0 +1,143 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MEMORY_H_
+#define _RTE_MEMORY_H_
+
+/**
+ * @file
+ *
+ * Memory-related RTE API.
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum rte_page_sizes {
+       RTE_PGSIZE_4K = 1 << 12,
+       RTE_PGSIZE_2M = RTE_PGSIZE_4K << 9,
+       RTE_PGSIZE_1G = RTE_PGSIZE_2M <<9
+};
+
+#define SOCKET_ID_ANY -1                    /**< Any NUMA socket. */
+#define CACHE_LINE_SIZE 64                  /**< Cache line size. */
+#define CACHE_LINE_MASK (CACHE_LINE_SIZE-1) /**< Cache line mask. */
+
+#define CACHE_LINE_ROUNDUP(size) \
+       (CACHE_LINE_SIZE * ((size + CACHE_LINE_SIZE - 1) / CACHE_LINE_SIZE))
+/**< Return the first cache-aligned value greater or equal to size. */
+
+/**
+ * Force alignment to cache line.
+ */
+#define __rte_cache_aligned __attribute__((__aligned__(CACHE_LINE_SIZE)))
+
+#ifndef __KERNEL__ /* so we can include this header in kernel modules */
+typedef uint64_t phys_addr_t; /**< Physical address definition. */
+#endif
+
+/**
+ * Physical memory segment descriptor.
+ */
+struct rte_memseg {
+       phys_addr_t phys_addr;      /**< Start physical address. */
+       union {
+               void *addr;         /**< Start virtual address. */
+               uint64_t addr_64;   /**< Makes sure addr is always 64 bits */
+       };
+       uint64_t len;               /**< Length of the segment. */
+       uint64_t hugepage_sz;       /**< The pagesize of underlying memory */
+       int32_t socket_id;          /**< NUMA socket ID. */
+       uint32_t nchannel;          /**< Number of channels. */
+       uint32_t nrank;             /**< Number of ranks. */
+} __attribute__((__packed__));
+
+
+/**
+ * Get the layout of the available physical memory.
+ *
+ * It can be useful for an application to have the full physical
+ * memory layout to decide the size of a memory zone to reserve. This
+ * table is stored in rte_config (see rte_eal_get_configuration()).
+ *
+ * @return
+ *  - On success, return a pointer to a read-only table of struct
+ *    rte_physmem_desc elements, containing the layout of all
+ *    addressable physical memory. The last element of the table
+ *    contains a NULL address.
+ *  - On error, return NULL. This should not happen since it is a fatal
+ *    error that will probably cause the entire system to panic.
+ */
+const struct rte_memseg *rte_eal_get_physmem_layout(void);
+
+/**
+ * Dump the physical memory layout to the console.
+ */
+void rte_dump_physmem_layout(void);
+
+/**
+ * Get the total amount of available physical memory.
+ *
+ * @return
+ *    The total amount of available physical memory in bytes.
+ */
+uint64_t rte_eal_get_physmem_size(void);
+
+/**
+ * Get the number of memory channels.
+ *
+ * @return
+ *   The number of memory channels on the system. The value is 0 if unknown
+ *   or not the same on all devices.
+ */
+unsigned rte_memory_get_nchannel(void);
+
+/**
+ * Get the number of memory ranks.
+ *
+ * @return
+ *   The number of memory ranks on the system. The value is 0 if unknown or
+ *   not the same on all devices.
+ */
+unsigned rte_memory_get_nrank(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMORY_H_ */
diff --git a/lib/librte_eal/common/include/rte_memzone.h b/lib/librte_eal/common/include/rte_memzone.h
new file mode 100644 (file)
index 0000000..02da3db
--- /dev/null
@@ -0,0 +1,200 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MEMZONE_H_
+#define _RTE_MEMZONE_H_
+
+/**
+ * @file
+ * RTE Memzone
+ *
+ * The goal of the memzone allocator is to reserve contiguous
+ * portions of physical memory. These zones are identified by a name.
+ *
+ * The memzone descriptors are shared by all partitions and are
+ * located in a known place of physical memory. This zone is accessed
+ * using rte_eal_get_configuration(). The lookup (by name) of a
+ * memory zone can be done in any partition and returns the same
+ * physical address.
+ *
+ * A reserved memory zone cannot be unreserved. The reservation shall
+ * be done at initialization time only.
+ */
+
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MEMZONE_2MB            0x00000001   /**< Use 2MB pages. */
+#define RTE_MEMZONE_1GB            0x00000002   /**< Use 1GB pages. */
+#define RTE_MEMZONE_SIZE_HINT_ONLY 0x00000004   /**< Use available page size */
+
+/**
+ * A structure describing a memzone, which is a contiguous portion of
+ * physical memory identified by a name.
+ */
+struct rte_memzone {
+
+#define RTE_MEMZONE_NAMESIZE 32       /**< Maximum length of memory zone name.*/
+       char name[RTE_MEMZONE_NAMESIZE];  /**< Name of the memory zone. */
+
+       phys_addr_t phys_addr;            /**< Start physical address. */
+       union {
+               void *addr;                   /**< Start virtual address. */
+               uint64_t addr_64;             /**< Makes sure addr is always 64-bits */
+       };
+       uint64_t len;                     /**< Length of the memzone. */
+
+       uint64_t hugepage_sz;             /**< The page size of underlying memory */
+
+       int32_t socket_id;                /**< NUMA socket ID. */
+
+       uint32_t flags;                   /**< Characteristics of this memzone. */
+} __attribute__((__packed__));
+
+/**
+ * Reserve a portion of physical memory.
+ *
+ * This function reserves some memory and returns a pointer to a
+ * correctly filled memzone descriptor. If the allocation cannot be
+ * done, return NULL. Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ *   The name of the memzone. If it already exists, the function will
+ *   fail and return NULL.
+ * @param len
+ *   The size of the memory to be reserved. If it
+ *   is 0, the biggest contiguous zone will be reserved.
+ * @param socket_id
+ *   The socket identifier in the case of
+ *   NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ *   constraint for the reserved zone.
+ * @param flags
+ *   The flags parameter is used to request memzones to be
+ *   taken from 1GB or 2MB hugepages.
+ *   - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ *   - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ *   - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ *                                  the requested page size is unavailable.
+ *                                  If this flag is not set, the function
+ *                                  will return error on an unavailable size
+ *                                  request.
+ * @return
+ *   A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ *   on error.
+ *   On error case, rte_errno will be set appropriately:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ *    - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve(const char *name,
+                                             uint64_t len, int socket_id,
+                                             unsigned flags);
+
+/**
+ * Reserve a portion of physical memory with alignment on a specified
+ * boundary.
+ *
+ * This function reserves some memory with alignment on a specified
+ * boundary, and returns a pointer to a correctly filled memzone
+ * descriptor. If the allocation cannot be done or if the alignment
+ * is not a power of 2, returns NULL.
+ * Note: A reserved zone cannot be freed.
+ *
+ * @param name
+ *   The name of the memzone. If it already exists, the function will
+ *   fail and return NULL.
+ * @param len
+ *   The size of the memory to be reserved. If it
+ *   is 0, the biggest contiguous zone will be reserved.
+ * @param align
+ *   Alignment for resulting memzone. Must be a power of 2.
+ * @param socket_id
+ *   The socket identifier in the case of
+ *   NUMA. The value can be SOCKET_ID_ANY if there is no NUMA
+ *   constraint for the reserved zone.
+ * @param flags
+ *   The flags parameter is used to request memzones to be
+ *   taken from 1GB or 2MB hugepages.
+ *   - RTE_MEMZONE_2MB - Reserve from 2MB pages
+ *   - RTE_MEMZONE_1GB - Reserve from 1GB pages
+ *   - RTE_MEMZONE_SIZE_HINT_ONLY - Allow alternative page size to be used if
+ *                                  the requested page size is unavailable.
+ *                                  If this flag is not set, the function
+ *                                  will return error on an unavailable size
+ *                                  request.
+ * @return
+ *   A pointer to a correctly-filled read-only memzone descriptor, or NULL
+ *   on error.
+ *   On error case, rte_errno will be set appropriately:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ *    - EINVAL - invalid parameters
+ */
+const struct rte_memzone *rte_memzone_reserve_aligned(const char *name,
+                                             uint64_t len, int socket_id, unsigned flags,
+                                             unsigned align);
+
+/**
+ * Lookup for a memzone.
+ *
+ * Get a pointer to a descriptor of an already reserved memory
+ * zone identified by the name given as an argument.
+ *
+ * @param name
+ *   The name of the memzone.
+ * @return
+ *   A pointer to a read-only memzone descriptor.
+ */
+const struct rte_memzone *rte_memzone_lookup(const char *name);
+
+/**
+ * Dump all reserved memzones to the console.
+ */
+void rte_memzone_dump(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMZONE_H_ */
diff --git a/lib/librte_eal/common/include/rte_pci.h b/lib/librte_eal/common/include/rte_pci.h
new file mode 100644 (file)
index 0000000..f2128b5
--- /dev/null
@@ -0,0 +1,197 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_PCI_H_
+#define _RTE_PCI_H_
+
+/**
+ * @file
+ *
+ * RTE PCI Interface
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <rte_interrupts.h>
+
+TAILQ_HEAD(pci_device_list, rte_pci_device); /**< PCI devices in D-linked Q. */
+TAILQ_HEAD(pci_driver_list, rte_pci_driver); /**< PCI drivers in D-linked Q. */
+
+extern struct pci_driver_list driver_list; /**< Global list of PCI drivers. */
+extern struct pci_device_list device_list; /**< Global list of PCI devices. */
+
+/** Pathname of PCI devices directory. */
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+
+/** Formatting string for PCI device identifier: Ex: 0000:00:01.0 */
+#define PCI_PRI_FMT "%.4"PRIx16":%.2"PRIx8":%.2"PRIx8".%"PRIx8
+
+/** Nb. of values in PCI device identifier format string. */
+#define PCI_FMT_NVAL 4
+
+/** Nb. of values in PCI resource format. */
+#define PCI_RESOURCE_FMT_NVAL 3
+
+/**
+ * A structure describing a PCI resource.
+ */
+struct rte_pci_resource {
+       uint64_t phys_addr;   /**< Physical address, 0 if no resource. */
+       uint64_t len;         /**< Length of the resource. */
+       void *addr;           /**< Virtual address, NULL when not mapped. */
+};
+
+/** Maximum number of PCI resources. */
+#define PCI_MAX_RESOURCE 7
+
+/**
+ * A structure describing an ID for a PCI driver. Each driver provides a
+ * table of these IDs for each device that it supports.
+ */
+struct rte_pci_id {
+       uint16_t vendor_id;           /**< Vendor ID or PCI_ANY_ID. */
+       uint16_t device_id;           /**< Device ID or PCI_ANY_ID. */
+       uint16_t subsystem_vendor_id; /**< Subsystem vendor ID or PCI_ANY_ID. */
+       uint16_t subsystem_device_id; /**< Subsystem device ID or PCI_ANY_ID. */
+};
+
+/**
+ * A structure describing the location of a PCI device.
+ */
+struct rte_pci_addr {
+       uint16_t domain;                /**< Device domain */
+       uint8_t bus;                    /**< Device bus */
+       uint8_t devid;                  /**< Device ID */
+       uint8_t function;               /**< Device function. */
+};
+
+/**
+ * A structure describing a PCI device.
+ */
+struct rte_pci_device {
+       TAILQ_ENTRY(rte_pci_device) next;       /**< Next probed PCI device. */
+       struct rte_pci_addr addr;               /**< PCI location. */
+       struct rte_pci_id id;                   /**< PCI ID. */
+       struct rte_pci_resource mem_resource;   /**< PCI Memory Resource */
+       struct rte_intr_handle intr_handle;     /**< Interrupt handle */
+};
+
+/** Any PCI device identifier (vendor, device, ...) */
+#define PCI_ANY_ID (0xffff)
+
+#ifdef __cplusplus
+/** C++ macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev) \
+       (vend),                   \
+       (dev),                    \
+       PCI_ANY_ID,               \
+       PCI_ANY_ID
+#else
+/** Macro used to help building up tables of device IDs */
+#define RTE_PCI_DEVICE(vend, dev)          \
+       .vendor_id = (vend),               \
+       .device_id = (dev),                \
+       .subsystem_vendor_id = PCI_ANY_ID, \
+       .subsystem_device_id = PCI_ANY_ID
+#endif
+
+struct rte_pci_driver;
+
+/**
+ * Initialisation function for the driver called during PCI probing.
+ */
+typedef int (pci_devinit_t)(struct rte_pci_driver *, struct rte_pci_device *);
+
+/**
+ * A structure describing a PCI driver.
+ */
+struct rte_pci_driver {
+       TAILQ_ENTRY(rte_pci_driver) next;       /**< Next in list. */
+       const char *name;                       /**< Driver name. */
+       pci_devinit_t *devinit;                 /**< Device init. function. */
+       struct rte_pci_id *id_table;            /**< ID table, NULL terminated. */
+       uint32_t drv_flags;                     /**< Flags contolling handling of device. */
+};
+
+/**< Device needs igb_uio kernel module */
+#define RTE_PCI_DRV_NEED_IGB_UIO 0x0001
+
+/**
+ * Probe the PCI bus for registered drivers.
+ *
+ * Scan the content of the PCI bus, and call the probe() function for
+ * all registered drivers that have a matching entry in its id_table
+ * for discovered devices.
+ *
+ * @return
+ *   - 0 on success.
+ *   - Negative on error.
+ */
+int rte_eal_pci_probe(void);
+
+/**
+ * Dump the content of the PCI bus.
+ */
+void rte_eal_pci_dump(void);
+
+/**
+ * Register a PCI driver.
+ *
+ * @param driver
+ *   A pointer to a rte_pci_driver structure describing the driver
+ *   to be registered.
+ */
+void rte_eal_pci_register(struct rte_pci_driver *driver);
+
+/**
+ * Register a list of PCI locations that will be blacklisted (not used by DPDK).
+ *
+ * @param blacklist
+ *   List of PCI device addresses that will not be used by DPDK.
+ * @param size
+ *   Number of items in the list.
+ */
+void rte_eal_pci_set_blacklist(struct rte_pci_addr *blacklist, unsigned size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PCI_H_ */
diff --git a/lib/librte_eal/common/include/rte_pci_dev_ids.h b/lib/librte_eal/common/include/rte_pci_dev_ids.h
new file mode 100644 (file)
index 0000000..402d21f
--- /dev/null
@@ -0,0 +1,205 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ *
+ * This file contains a list of the PCI device IDs recognised by DPDK, which
+ * can be used to fill out an array of structures describing the devices.
+ *
+ * Currently two families of devices are recognised: those supported by the
+ * IGB driver, and those supported by the IXGBE driver. The inclusion of these
+ * in an array built using this file depends on the definition of
+ * RTE_LIBRTE_IGB_PMD and RTE_LIBRTE_IXGBE_PMD at the time when this file is
+ * included.
+ *
+ * In order to populate an array, the user of this file must define this macro:
+ * RTE_PCI_DEV_ID_DECL(vendorID, deviceID). For example:
+ *
+ * @code
+ * struct device {
+ *     int vend;
+ *     int dev;
+ * };
+ *
+ * struct device devices[] = {
+ * #define RTE_PCI_DEV_ID_DECL(vendorID, deviceID) {vend, dev},
+ * #include <rte_pci_dev_ids.h>
+ * };
+ * @endcode
+ *
+ * Note that this file can be included multiple times within the same file.
+ */
+
+#ifndef RTE_PCI_DEV_ID_DECL
+#error "You must define RTE_PCI_DEV_ID_DECL before including rte_pci_dev_ids.h"
+#endif
+
+#ifndef PCI_VENDOR_ID_INTEL
+/** Vendor ID used by Intel devices */
+#define PCI_VENDOR_ID_INTEL 0x8086
+#endif
+
+/******************** Physical IGB devices from e1000_hw.h ********************/
+#ifdef RTE_LIBRTE_IGB_PMD
+
+#define E1000_DEV_ID_82576                      0x10C9
+#define E1000_DEV_ID_82576_FIBER                0x10E6
+#define E1000_DEV_ID_82576_SERDES               0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER          0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2      0x1526
+#define E1000_DEV_ID_82576_NS                   0x150A
+#define E1000_DEV_ID_82576_NS_SERDES            0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD          0x150D
+#define E1000_DEV_ID_82575EB_COPPER             0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES       0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER        0x10D6
+#define E1000_DEV_ID_82580_COPPER               0x150E
+#define E1000_DEV_ID_82580_FIBER                0x150F
+#define E1000_DEV_ID_82580_SERDES               0x1510
+#define E1000_DEV_ID_82580_SGMII                0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL          0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER           0x1527
+#define E1000_DEV_ID_I350_COPPER                0x1521
+#define E1000_DEV_ID_I350_FIBER                 0x1522
+#define E1000_DEV_ID_I350_SERDES                0x1523
+#define E1000_DEV_ID_I350_SGMII                 0x1524
+#define E1000_DEV_ID_I350_DA4                   0x1546
+#define E1000_DEV_ID_DH89XXCC_SGMII             0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES            0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE         0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP               0x0440
+
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_FIBER)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_NS_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82576_SERDES_QUAD)
+
+/* This device is the on-board NIC on some development boards. */
+#ifdef RTE_PCI_DEV_USE_82575EB_COPPER
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_COPPER)
+#endif
+
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER)
+
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_FIBER)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_SGMII)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_COPPER_DUAL)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_82580_QUAD_FIBER)
+
+/* This device is the on-board NIC on some development boards. */
+#ifndef RTE_PCI_DEV_NO_USE_I350_COPPER
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_COPPER)
+#endif
+
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_FIBER)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_SGMII)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_I350_DA4)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SGMII)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SERDES)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, E1000_DEV_ID_DH89XXCC_SFP)
+
+#endif /* RTE_LIBRTE_IGB_PMD */
+
+
+/****************** Physical IXGBE devices from ixgbe_type.h ******************/
+#ifdef RTE_LIBRTE_IXGBE_PMD
+
+#define IXGBE_DEV_ID_82598                      0x10B6
+#define IXGBE_DEV_ID_82598_BX                   0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT          0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT        0x10C7
+#define IXGBE_DEV_ID_82598AT                    0x10C8
+#define IXGBE_DEV_ID_82598AT2                   0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM            0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4                0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT        0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT         0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR              0x10F4
+#define IXGBE_DEV_ID_82599_KX4                  0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ             0x1514
+#define IXGBE_DEV_ID_82599_KR                   0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE      0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ       0x000C
+#define IXGBE_DEV_ID_82599_CX4                  0x10F9
+#define IXGBE_DEV_ID_82599_SFP                  0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP               0x11A9
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE             0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM               0x1507
+#define IXGBE_DEV_ID_82599EN_SFP                0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM             0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM               0x151C
+#define IXGBE_DEV_ID_X540T                      0x1528
+
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_BX)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598AT2)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_CX4)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82598EB_XF_LR)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_KR)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_CX4)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_SUBDEV_ID_82599_SFP)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_FCOE)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_SFP_EM)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599EN_SFP)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_XAUI_LOM)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_82599_T3_LOM)
+RTE_PCI_DEV_ID_DECL(PCI_VENDOR_ID_INTEL, IXGBE_DEV_ID_X540T)
+
+#endif /* RTE_LIBRTE_IXGBE_PMD */
diff --git a/lib/librte_eal/common/include/rte_per_lcore.h b/lib/librte_eal/common/include/rte_per_lcore.h
new file mode 100644 (file)
index 0000000..08627dd
--- /dev/null
@@ -0,0 +1,81 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_PER_LCORE_H_
+#define _RTE_PER_LCORE_H_
+
+/**
+ * @file
+ *
+ * Per-lcore variables in RTE
+ *
+ * This file defines an API for instantiating per-lcore "global
+ * variables" that are environment-specific. Note that in all
+ * environments, a "shared variable" is the default when you use a
+ * global variable.
+ *
+ * Parts of this are execution environment specific.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <exec-env/rte_per_lcore.h>
+
+#ifdef __DOXYGEN__
+/**
+ * Macro to define a per lcore variable "var" of type "type", don't
+ * use keywords like "static" or "volatile" in type, just prefix the
+ * whole macro.
+ */
+#define RTE_DEFINE_PER_LCORE(type, name)
+
+/**
+ * Macro to declare an extern per lcore variable "var" of type "type"
+ */
+#define RTE_DECLARE_PER_LCORE(type, name)
+
+/**
+ * Read/write the per-lcore variable value
+ */
+#define RTE_PER_LCORE(name)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PER_LCORE_H_ */
diff --git a/lib/librte_eal/common/include/rte_prefetch.h b/lib/librte_eal/common/include/rte_prefetch.h
new file mode 100644 (file)
index 0000000..0d21602
--- /dev/null
@@ -0,0 +1,90 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_PREFETCH_H_
+#define _RTE_PREFETCH_H_
+
+/**
+ * @file
+ *
+ * Prefetch operations.
+ *
+ * This file defines an API for prefetch macros / inline-functions,
+ * which are architecture-dependent. Prefetching occurs when a
+ * processor requests an instruction or data from memory to cache
+ * before it is actually needed, potentially speeding up the execution of the
+ * program.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Prefetch a cache line into all cache levels.
+ * @param p
+ *   Address to prefetch
+ */
+static inline void rte_prefetch0(volatile void *p)
+{
+       asm volatile ("prefetcht0 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th cache level.
+ * @param p
+ *   Address to prefetch
+ */
+static inline void rte_prefetch1(volatile void *p)
+{
+       asm volatile ("prefetcht1 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+/**
+ * Prefetch a cache line into all cache levels except the 0th and 1th cache
+ * levels.
+ * @param p
+ *   Address to prefetch
+ */
+static inline void rte_prefetch2(volatile void *p)
+{
+       asm volatile ("prefetcht2 %[p]" : [p] "+m" (*(volatile char *)p));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_PREFETCH_H_ */
diff --git a/lib/librte_eal/common/include/rte_random.h b/lib/librte_eal/common/include/rte_random.h
new file mode 100644 (file)
index 0000000..a86906f
--- /dev/null
@@ -0,0 +1,93 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_RANDOM_H_
+#define _RTE_RANDOM_H_
+
+/**
+ * @file
+ *
+ * Pseudo-random Generators in RTE
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/**
+ * Seed the pseudo-random generator.
+ *
+ * The generator is automatically seeded by the EAL init with a timer
+ * value. It may need to be re-seeded by the user with a real random
+ * value.
+ *
+ * @param seedval
+ *   The value of the seed.
+ */
+static inline void
+rte_srand(uint64_t seedval)
+{
+       srand48((long unsigned int)seedval);
+}
+
+/**
+ * Get a pseudo-random value.
+ *
+ * This function generates pseudo-random numbers using the linear
+ * congruential algorithm and 48-bit integer arithmetic, called twice
+ * to generate a 64-bit value.
+ *
+ * @return
+ *   A pseudo-random value between 0 and (1<<64)-1.
+ */
+static inline uint64_t
+rte_rand(void)
+{
+       uint64_t val;
+       val = lrand48();
+       val <<= 32;
+       val += lrand48();
+       return val;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _RTE_PER_LCORE_H_ */
diff --git a/lib/librte_eal/common/include/rte_rwlock.h b/lib/librte_eal/common/include/rte_rwlock.h
new file mode 100644 (file)
index 0000000..a0b5e01
--- /dev/null
@@ -0,0 +1,174 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_RWLOCK_H_
+#define _RTE_RWLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Read-Write Locks
+ *
+ * This file defines an API for read-write locks. The lock is used to
+ * protect data that allows multiple readers in parallel, but only
+ * one writer. All readers are blocked until the writer is finished
+ * writing.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+#include <rte_atomic.h>
+
+/**
+ * The rte_rwlock_t type.
+ *
+ * cnt is -1 when write lock is held, and > 0 when read locks are held.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */
+} rte_rwlock_t;
+
+/**
+ * A static rwlock initializer.
+ */
+#define RTE_RWLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the rwlock to an unlocked state.
+ *
+ * @param rwl
+ *   A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_init(rte_rwlock_t *rwl)
+{
+       rwl->cnt = 0;
+}
+
+/**
+ * Take a read lock. Loop until the lock is held.
+ *
+ * @param rwl
+ *   A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_read_lock(rte_rwlock_t *rwl)
+{
+       int32_t x;
+       int success = 0;
+
+       while (success == 0) {
+               x = rwl->cnt;
+               /* write lock is held */
+               if (x < 0) {
+                       rte_pause();
+                       continue;
+               }
+               success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
+                                             x, x + 1);
+       }
+}
+
+/**
+ * Release a read lock.
+ *
+ * @param rwl
+ *   A pointer to the rwlock structure.
+ */
+static inline void
+rte_rwlock_read_unlock(rte_rwlock_t *rwl)
+{
+       /* in debug mode, we should check that rwl->cnt is > 0 */
+
+       /* same than atomic32_dec */
+       asm volatile(MPLOCKED
+                    "decl %[cnt]"
+                    : [cnt] "=m" (rwl->cnt) /* output (0) */
+                    : "m" (rwl->cnt)        /* input (1) */
+                    );                      /* no clobber-list */
+}
+
+/**
+ * Take a write lock. Loop until the lock is held.
+ *
+ * @param rwl
+ *   A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_lock(rte_rwlock_t *rwl)
+{
+       int32_t x;
+       int success = 0;
+
+       while (success == 0) {
+               x = rwl->cnt;
+               /* a lock is held */
+               if (x != 0) {
+                       rte_pause();
+                       continue;
+               }
+               success = rte_atomic32_cmpset((volatile uint32_t *)&rwl->cnt,
+                                             0, -1);
+       }
+}
+
+/**
+ * Release a write lock.
+ *
+ * @param rwl
+ *   A pointer to a rwlock structure.
+ */
+static inline void
+rte_rwlock_write_unlock(rte_rwlock_t *rwl)
+{
+       /* in debug mode, we should check that rwl->cnt is < 0 */
+
+       /* same than atomic32_inc */
+       asm volatile(MPLOCKED
+                    "incl %[cnt]"
+                    : [cnt] "=m" (rwl->cnt) /* output (0) */
+                    : "m" (rwl->cnt)        /* input (1) */
+                    );                      /* no clobber-list */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RWLOCK_H_ */
diff --git a/lib/librte_eal/common/include/rte_spinlock.h b/lib/librte_eal/common/include/rte_spinlock.h
new file mode 100644 (file)
index 0000000..7961809
--- /dev/null
@@ -0,0 +1,243 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_SPINLOCK_H_
+#define _RTE_SPINLOCK_H_
+
+/**
+ * @file
+ *
+ * RTE Spinlocks
+ *
+ * This file defines an API for read-write locks, which are implemented
+ * in an architecture-specific way. This kind of lock simply waits in
+ * a loop repeatedly checking until the lock becomes available.
+ *
+ * All locks must be initialised before use, and only initialised once.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_lcore.h>
+
+/**
+ * The rte_spinlock_t type.
+ */
+typedef struct {
+       volatile int locked; /**< lock status 0 = unlocked, 1 = locked */
+} rte_spinlock_t;
+
+/**
+ * A static spinlock initializer.
+ */
+#define RTE_SPINLOCK_INITIALIZER { 0 }
+
+/**
+ * Initialize the spinlock to an unlocked state.
+ *
+ * @param sl
+ *   A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_init(rte_spinlock_t *sl)
+{
+       sl->locked = 0;
+}
+
+/**
+ * Take the spinlock.
+ *
+ * @param sl
+ *   A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_lock(rte_spinlock_t *sl)
+{
+       int lock_val = 1;
+       asm volatile (
+                       "1:\n"
+                       "xchg %[locked], %[lv]\n"
+                       "test %[lv], %[lv]\n"
+                       "jz 3f\n"
+                       "2:\n"
+                       "pause\n"
+                       "cmp $0, %[locked]\n"
+                       "jnz 2b\n"
+                       "jmp 1b\n"
+                       "3:\n"
+                       : [locked] "=m" (sl->locked), [lv] "=q" (lock_val)
+                       : "[lv]" (lock_val)
+                       : "memory");
+}
+
+/**
+ * Release the spinlock.
+ *
+ * @param sl
+ *   A pointer to the spinlock.
+ */
+static inline void
+rte_spinlock_unlock (rte_spinlock_t *sl)
+{
+       int unlock_val = 0;
+       asm volatile (
+                       "xchg %[locked], %[ulv]\n"
+                       : [locked] "=m" (sl->locked), [ulv] "=q" (unlock_val)
+                       : "[ulv]" (unlock_val)
+                       : "memory");
+}
+
+/**
+ * Try to take the lock.
+ *
+ * @param sl
+ *   A pointer to the spinlock.
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int
+rte_spinlock_trylock (rte_spinlock_t *sl)
+{
+       int lockval = 1;
+
+       asm volatile (
+                       "xchg %[locked], %[lockval]"
+                       : [locked] "=m" (sl->locked), [lockval] "=q" (lockval)
+                       : "[lockval]" (lockval)
+                       : "memory");
+
+       return (lockval == 0);
+}
+
+/**
+ * Test if the lock is taken.
+ *
+ * @param sl
+ *   A pointer to the spinlock.
+ * @return
+ *   1 if the lock is currently taken; 0 otherwise.
+ */
+static inline int rte_spinlock_is_locked (rte_spinlock_t *sl)
+{
+       return sl->locked;
+}
+
+/**
+ * The rte_spinlock_recursive_t type.
+ */
+typedef struct {
+       rte_spinlock_t sl; /**< the actual spinlock */
+       volatile int user; /**< core id using lock, -1 for unused */
+       volatile int count; /**< count of time this lock has been called */
+} rte_spinlock_recursive_t;
+
+/**
+ * A static recursive spinlock initializer.
+ */
+#define RTE_SPINLOCK_RECURSIVE_INITIALIZER {RTE_SPINLOCK_INITIALIZER, -1, 0}
+
+/**
+ * Initialize the recursive spinlock to an unlocked state.
+ *
+ * @param slr
+ *   A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_init(rte_spinlock_recursive_t *slr)
+{
+       rte_spinlock_init(&slr->sl);
+       slr->user = -1;
+       slr->count = 0;
+}
+
+/**
+ * Take the recursive spinlock.
+ *
+ * @param slr
+ *   A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_lock(rte_spinlock_recursive_t *slr)
+{
+       int id = rte_lcore_id();
+
+       if (slr->user != id) {
+               rte_spinlock_lock(&slr->sl);
+               slr->user = id;
+       }
+       slr->count++;
+}
+/**
+ * Release the recursive spinlock.
+ *
+ * @param slr
+ *   A pointer to the recursive spinlock.
+ */
+static inline void rte_spinlock_recursive_unlock(rte_spinlock_recursive_t *slr)
+{
+       if (--(slr->count) == 0) {
+               slr->user = -1;
+               rte_spinlock_unlock(&slr->sl);
+       }
+
+}
+
+/**
+ * Try to take the recursive lock.
+ *
+ * @param slr
+ *   A pointer to the recursive spinlock.
+ * @return
+ *   1 if the lock is successfully taken; 0 otherwise.
+ */
+static inline int rte_spinlock_recursive_trylock(rte_spinlock_recursive_t *slr)
+{
+       int id = rte_lcore_id();
+
+       if (slr->user != id) {
+               if (rte_spinlock_trylock(&slr->sl) == 0)
+                       return 0;
+               slr->user = id;
+       }
+       slr->count++;
+       return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_SPINLOCK_H_ */
diff --git a/lib/librte_eal/common/include/rte_string_fns.h b/lib/librte_eal/common/include/rte_string_fns.h
new file mode 100644 (file)
index 0000000..da3a3c9
--- /dev/null
@@ -0,0 +1,165 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ *
+ * Definitions of warnings for use of various insecure functions
+ */
+
+#ifndef _RTE_STRING_FNS_H_
+#define _RTE_STRING_FNS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <errno.h>
+
+/**
+ * Safer version of snprintf that writes up to buflen characters to
+ * the output buffer and ensures that the resultant string is null-terminated,
+ * that is, it writes at most buflen-1 actual string characters to buffer. The
+ * return value is the number of characters which should be written to the
+ * buffer, so string truncation can be detected by the caller by checking if
+ * the return value is greater than or equal to the buflen.
+ *
+ * @param buffer
+ *   The buffer into which the output is to be written
+ *
+ * @param buflen
+ *   The size of the output buffer
+ *
+ * @param format
+ *   The format string to be printed to the buffer
+ *
+ * @return
+ *   The number of characters written to the buffer, or if the string has been
+ *   truncated, the number of characters which would have been written had the
+ *   buffer been sufficiently big.
+ *
+ */
+static inline int
+rte_snprintf(char *buffer, int buflen, const char *format, ...)
+{
+       int len;
+       va_list ap;
+
+       if (buffer == NULL && buflen != 0)
+               goto einval_error;
+       if (format == NULL) {
+               if (buflen > 0)
+                       buffer[0] = '\0';
+               goto einval_error;
+       }
+
+       va_start(ap, format);
+       len = vsnprintf(buffer, buflen, format, ap);
+       va_end(ap);
+       if (len >= buflen && buflen > 0)
+               buffer[buflen - 1] = '\0';
+
+       return len;
+
+einval_error:
+       errno = EINVAL;
+       return -1;
+}
+
+
+/**
+ * Takes string "string" parameter and splits it at character "delim"
+ * up to maxtokens-1 times - to give "maxtokens" resulting tokens. Like
+ * strtok or strsep functions, this modifies its input string, by replacing
+ * instances of "delim" with '\0'. All resultant tokens are returned in the
+ * "tokens" array which must have enough entries to hold "maxtokens".
+ *
+ * @param string
+ *   The input string to be split into tokens
+ *
+ * @param stringlen
+ *   The max length of the input buffer
+ *
+ * @param tokens
+ *   The array to hold the pointers to the tokens in the string
+ *
+ * @param maxtokens
+ *   The number of elements in the tokens array. At most, maxtokens-1 splits
+ *   of the string will be done.
+ *
+ * @param delim
+ *   The character on which the split of the data will be done
+ *
+ * @return
+ *   The number of tokens in the tokens array.
+ */
+static inline int
+rte_strsplit(char *string, int stringlen,
+               char **tokens, int maxtokens, char delim)
+{
+       int i, tok = 0;
+       int tokstart = 1; /* first token is right at start of string */
+
+       if (string == NULL || tokens == NULL)
+               goto einval_error;
+
+       for (i = 0; i < stringlen; i++) {
+               if (string[i] == '\0' || tok >= maxtokens)
+                       break;
+               if (tokstart) {
+                       tokstart = 0;
+                       tokens[tok++] = &string[i];
+               }
+               if (string[i] == delim) {
+                       string[i] = '\0';
+                       tokstart = 1;
+               }
+       }
+       return tok;
+
+einval_error:
+       errno = EINVAL;
+       return -1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* RTE_STRING_FNS_H */
diff --git a/lib/librte_eal/common/include/rte_tailq.h b/lib/librte_eal/common/include/rte_tailq.h
new file mode 100644 (file)
index 0000000..db13013
--- /dev/null
@@ -0,0 +1,146 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_TAILQ_H_
+#define _RTE_TAILQ_H_
+
+/**
+ * @file
+ *
+ */
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <sys/queue.h>
+
+#ifndef __KERNEL__
+/** dummy structure type used by the rte_tailq APIs */
+struct rte_dummy {
+       TAILQ_ENTRY(rte_dummy) next; /**< Pointer entries for a tailq list */
+};
+/** dummy */
+TAILQ_HEAD(rte_dummy_head, rte_dummy);
+
+#define RTE_TAILQ_NAMESIZE 32
+
+/**
+ * The structure defining a tailq header entry for storing
+ * in the rte_config structure in shared memory. Each tailq
+ * is identified by name.
+ * Any library storing a set of objects e.g. rings, mempools, hash-tables,
+ * is recommended to use an entry here, so as to make it easy for
+ * a multi-process app to find already-created elements in shared memory.
+ */
+struct rte_tailq_head {
+       struct rte_dummy_head tailq_head; /**< NOTE: must be first element */
+       char qname[RTE_TAILQ_NAMESIZE]; /**< Queue name */
+};
+#else
+struct rte_tailq_head {};
+#endif
+
+/**
+ * Utility macro to make reserving a tailqueue for a particular struct easier.
+ *
+ * @param name
+ *   The name to be given to the tailq - used by lookup to find it later
+ *
+ * @param struct_name
+ *   The name of the list type we are using. (Generally this is the same as the
+ *   first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ *   The return value from rte_eal_tailq_reserve, typecast to the appropriate
+ *   structure pointer type.
+ *   NULL on error, since the tailq_head is the first
+ *   element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_RESERVE(name, struct_name) \
+       (struct struct_name *)(&rte_eal_tailq_reserve(name)->tailq_head)
+
+/**
+ * Utility macro to make looking up a tailqueue for a particular struct easier.
+ *
+ * @param name
+ *   The name of the tailq
+ *
+ * @param struct_name
+ *   The name of the list type we are using. (Generally this is the same as the
+ *   first parameter passed to TAILQ_HEAD macro)
+ *
+ * @return
+ *   The return value from rte_eal_tailq_lookup, typecast to the appropriate
+ *   structure pointer type.
+ *   NULL on error, since the tailq_head is the first
+ *   element in the rte_tailq_head structure.
+ */
+#define RTE_TAILQ_LOOKUP(name, struct_name) \
+       (struct struct_name *)(&rte_eal_tailq_lookup(name)->tailq_head)
+
+/**
+ * Reserve a slot in the tailq list for a particular tailq header
+ * Note: this function, along with rte_tailq_lookup, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param name
+ *   The name to be given to the tail queue.
+ * @return
+ *   A pointer to the newly reserved tailq entry
+ */
+struct rte_tailq_head *rte_eal_tailq_reserve(const char *name);
+
+/**
+ * Lookup for a tail queue.
+ *
+ * Get a pointer to a tail queue header of an already reserved tail
+ * queue identified by the name given as an argument.
+ * Note: this function, along with rte_tailq_reserve, is not multi-thread safe,
+ * and both these functions should only be called from a single thread at a time
+ *
+ * @param name
+ *   The name of the queue.
+ * @return
+ *   A pointer to the tail queue head structure.
+ */
+struct rte_tailq_head *rte_eal_tailq_lookup(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TAILQ_H_ */
diff --git a/lib/librte_eal/common/include/rte_version.h b/lib/librte_eal/common/include/rte_version.h
new file mode 100644 (file)
index 0000000..b29c1d3
--- /dev/null
@@ -0,0 +1,85 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ * Definitions of Intel(R) DPDK version numbers
+ */
+
+#ifndef _RTE_VERSION_H_
+#define _RTE_VERSION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <rte_common.h>
+
+/**
+ * Major version number i.e. the x in x.y.z
+ */
+#define RTE_VER_MAJOR 1
+
+/**
+ * Minor version number i.e. the y in x.y.z
+ */
+#define RTE_VER_MINOR 2
+
+/**
+ * Patch level number i.e. the z in x.y.z
+ */
+#define RTE_VER_PATCH_LEVEL 3
+
+#define RTE_VER_PREFIX "RTE"
+
+/**
+ * Function returning string of version number: "RTE x.y.z"
+ * @return
+ *     string
+ */
+static inline const char *
+rte_version(void) {
+       return RTE_VER_PREFIX" "
+                       RTE_STR(RTE_VER_MAJOR)"."
+                       RTE_STR(RTE_VER_MINOR)"."
+                       RTE_STR(RTE_VER_PATCH_LEVEL);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_VERSION_H */
diff --git a/lib/librte_eal/common/include/rte_warnings.h b/lib/librte_eal/common/include/rte_warnings.h
new file mode 100644 (file)
index 0000000..eb00320
--- /dev/null
@@ -0,0 +1,88 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ * Definitions of warnings for use of various insecure functions
+ */
+
+#ifndef _RTE_WARNINGS_H_
+#define _RTE_WARNINGS_H_
+
+#ifdef RTE_INSECURE_FUNCTION_WARNING
+
+/* we need to include all used standard header files so that they appear
+ * _before_ we poison the function names.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <dirent.h>
+#include <stdarg.h>
+#include <errno.h>
+
+/* rte_snprintf uses snprintf, so include its definition before we poison the
+ * functions, otherwise we'll get an error in it. */
+#include <rte_string_fns.h>
+
+/* the following function are deemed not fully secure for use e.g. they
+ * do not always null-terminate arguments */
+#pragma GCC poison sprintf strtok snprintf vsnprintf
+#pragma GCC poison strlen strcpy strcat
+#pragma GCC poison sscanf
+
+/* other unsafe functions may be implemented as macros so just undef them */
+#ifdef strsep
+#undef strsep
+#else
+#pragma GCC poison strsep
+#endif
+
+#ifdef strncpy
+#undef strncpy
+#else
+#pragma GCC poison strncpy
+#endif
+
+#ifdef strncat
+#undef strncat
+#else
+#pragma GCC poison strncat
+#endif
+
+#endif
+
+#endif /* RTE_WARNINGS_H */
diff --git a/lib/librte_eal/common/include/x86_64/arch/rte_atomic.h b/lib/librte_eal/common/include/x86_64/arch/rte_atomic.h
new file mode 100644 (file)
index 0000000..a335c7f
--- /dev/null
@@ -0,0 +1,943 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Inspired from FreeBSD src/sys/amd64/include/atomic.h
+ * Copyright (c) 1998 Doug Rabson
+ * All rights reserved.
+ */
+
+#ifndef _RTE_ATOMIC_H_
+#error "don't include this file directly, please include generic <rte_atomic.h>"
+#endif
+
+#ifndef _RTE_X86_64_ATOMIC_H_
+#define _RTE_X86_64_ATOMIC_H_
+
+/**
+ * @file
+ * Atomic Operations on x86_64
+ */
+
+#if RTE_MAX_LCORE == 1
+#define MPLOCKED                        /**< No need to insert MP lock prefix. */
+#else
+#define MPLOCKED        "lock ; "       /**< Insert MP lock prefix. */
+#endif
+
+/**
+ * General memory barrier.
+ *
+ * Guarantees that the LOAD and STORE operations generated before the
+ * barrier occur before the LOAD and STORE operations generated after.
+ */
+#define        rte_mb()  asm volatile("mfence;" : : : "memory")
+
+/**
+ * Write memory barrier.
+ *
+ * Guarantees that the STORE operations generated before the barrier
+ * occur before the STORE operations generated after.
+ */
+#define        rte_wmb() asm volatile("sfence;" : : : "memory")
+
+/**
+ * Read memory barrier.
+ *
+ * Guarantees that the LOAD operations generated before the barrier
+ * occur before the LOAD operations generated after.
+ */
+#define        rte_rmb() asm volatile("lfence;" : : : "memory")
+
+/*------------------------- 16 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 16-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic16_cmpset(volatile uint16_t *dst, uint16_t exp, uint16_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgw %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int16_t cnt; /**< An internal counter value. */
+} rte_atomic16_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC16_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_init(rte_atomic16_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 16-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int16_t
+rte_atomic16_read(const rte_atomic16_t *v)
+{
+       return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 16-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
+{
+       v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 16-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic16_add(rte_atomic16_t *v, int16_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addw %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically subtract a 16-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic16_sub(rte_atomic16_t *v, int16_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subw %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_inc(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic16_dec(rte_atomic16_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decw %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically add a 16-bit value to a counter and return the result.
+ *
+ * Atomically adds the 16-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int16_t
+rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
+{
+       int16_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddw %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return (int16_t)(prev + inc);
+}
+
+/**
+ * Atomically subtract a 16-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 16-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int16_t
+rte_atomic16_sub_return(rte_atomic16_t *v, int16_t dec)
+{
+       return rte_atomic16_add_return(v, (int16_t)-dec);
+}
+
+/**
+ * Atomically increment a 16-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically decrement a 16-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decw %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically test and set a 16-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
+{
+       return rte_atomic16_cmpset((volatile uint16_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 16-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic16_clear(rte_atomic16_t *v)
+{
+       v->cnt = 0;
+}
+
+/*------------------------- 32 bit atomic operations -------------------------*/
+
+/**
+ * Atomic compare and set.
+ *
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 32-bit words)
+ *
+ * @param dst
+ *   The destination location into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgl %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int32_t cnt; /**< An internal counter value. */
+} rte_atomic32_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC32_INIT(val) { (val) }
+
+/**
+ * Initialize an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_init(rte_atomic32_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 32-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int32_t
+rte_atomic32_read(const rte_atomic32_t *v)
+{
+       return v->cnt;
+}
+
+/**
+ * Atomically set a counter to a 32-bit value.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value for the counter.
+ */
+static inline void
+rte_atomic32_set(rte_atomic32_t *v, int32_t new_value)
+{
+       v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 32-bit value to an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic32_add(rte_atomic32_t *v, int32_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addl %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically subtract a 32-bit value from an atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic32_sub(rte_atomic32_t *v, int32_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subl %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically increment a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_inc(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically decrement a counter by one.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic32_dec(rte_atomic32_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decl %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically add a 32-bit value to a counter and return the result.
+ *
+ * Atomically adds the 32-bits value (inc) to the atomic counter (v) and
+ * returns the value of v after addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int32_t
+rte_atomic32_add_return(rte_atomic32_t *v, int32_t inc)
+{
+       int32_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddl %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return (int32_t)(prev + inc);
+}
+
+/**
+ * Atomically subtract a 32-bit value from a counter and return
+ * the result.
+ *
+ * Atomically subtracts the 32-bit value (inc) from the atomic counter
+ * (v) and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int32_t
+rte_atomic32_sub_return(rte_atomic32_t *v, int32_t dec)
+{
+       return rte_atomic32_add_return(v, -dec);
+}
+
+/**
+ * Atomically increment a 32-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the increment operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically decrement a 32-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the decrement operation is 0; false otherwise.
+ */
+static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(MPLOCKED
+                       "decl %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return (ret != 0);
+}
+
+/**
+ * Atomically test and set a 32-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
+{
+       return rte_atomic32_cmpset((volatile uint32_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 32-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic32_clear(rte_atomic32_t *v)
+{
+       v->cnt = 0;
+}
+
+/*------------------------- 64 bit atomic operations -------------------------*/
+
+/**
+ * An atomic compare and set function used by the mutex functions.
+ * (atomic) equivalent to:
+ *   if (*dst == exp)
+ *     *dst = src (all 64-bit words)
+ *
+ * @param dst
+ *   The destination into which the value will be written.
+ * @param exp
+ *   The expected value.
+ * @param src
+ *   The new value.
+ * @return
+ *   Non-zero on success; 0 on failure.
+ */
+static inline int
+rte_atomic64_cmpset(volatile uint64_t *dst, uint64_t exp, uint64_t src)
+{
+       uint8_t res;
+
+       asm volatile(
+                       MPLOCKED
+                       "cmpxchgq %[src], %[dst];"
+                       "sete %[res];"
+                       : [res] "=a" (res),     /* output */
+                         [dst] "=m" (*dst)
+                       : [src] "r" (src),      /* input */
+                         "a" (exp),
+                         "m" (*dst)
+                       : "memory");            /* no-clobber list */
+
+       return res;
+}
+
+/**
+ * The atomic counter structure.
+ */
+typedef struct {
+       volatile int64_t cnt;  /**< Internal counter value. */
+} rte_atomic64_t;
+
+/**
+ * Static initializer for an atomic counter.
+ */
+#define RTE_ATOMIC64_INIT(val) { (val) }
+
+/**
+ * Initialize the atomic counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_init(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+
+/**
+ * Atomically read a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   The value of the counter.
+ */
+static inline int64_t
+rte_atomic64_read(rte_atomic64_t *v)
+{
+       return v->cnt;
+}
+
+/**
+ * Atomically set a 64-bit counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param new_value
+ *   The new value of the counter.
+ */
+static inline void
+rte_atomic64_set(rte_atomic64_t *v, int64_t new_value)
+{
+       v->cnt = new_value;
+}
+
+/**
+ * Atomically add a 64-bit value to a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ */
+static inline void
+rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
+{
+       asm volatile(
+                       MPLOCKED
+                       "addq %[inc], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [inc] "ir" (inc),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically subtract a 64-bit value from a counter.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ */
+static inline void
+rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
+{
+       asm volatile(
+                       MPLOCKED
+                       "subq %[dec], %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : [dec] "ir" (dec),     /* input */
+                         "m" (v->cnt)
+                       );
+}
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_inc(rte_atomic64_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "incq %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void
+rte_atomic64_dec(rte_atomic64_t *v)
+{
+       asm volatile(
+                       MPLOCKED
+                       "decq %[cnt]"
+                       : [cnt] "=m" (v->cnt)   /* output */
+                       : "m" (v->cnt)          /* input */
+                       );
+}
+
+/**
+ * Add a 64-bit value to an atomic counter and return the result.
+ *
+ * Atomically adds the 64-bit value (inc) to the atomic counter (v) and
+ * returns the value of v after the addition.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param inc
+ *   The value to be added to the counter.
+ * @return
+ *   The value of v after the addition.
+ */
+static inline int64_t
+rte_atomic64_add_return(rte_atomic64_t *v, int64_t inc)
+{
+       int64_t prev = inc;
+
+       asm volatile(
+                       MPLOCKED
+                       "xaddq %[prev], %[cnt]"
+                       : [prev] "+r" (prev),   /* output */
+                         [cnt] "=m" (v->cnt)
+                       : "m" (v->cnt)          /* input */
+                       );
+       return prev + inc;
+}
+
+/**
+ * Subtract a 64-bit value from an atomic counter and return the result.
+ *
+ * Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
+ * and returns the value of v after the subtraction.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @param dec
+ *   The value to be subtracted from the counter.
+ * @return
+ *   The value of v after the subtraction.
+ */
+static inline int64_t
+rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
+{
+       return rte_atomic64_add_return(v, -dec);
+}
+
+/**
+ * Atomically increment a 64-bit counter by one and test.
+ *
+ * Atomically increments the atomic counter (v) by one and returns
+ * true if the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after the addition is 0; false otherwise.
+ */
+static inline int rte_atomic64_inc_and_test(rte_atomic64_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "incq %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt), /* output */
+                         [ret] "=qm" (ret)
+                       );
+
+       return ret != 0;
+}
+
+/**
+ * Atomically decrement a 64-bit counter by one and test.
+ *
+ * Atomically decrements the atomic counter (v) by one and returns true if
+ * the result is 0, or false in all other cases.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   True if the result after subtraction is 0; false otherwise.
+ */
+static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
+{
+       uint8_t ret;
+
+       asm volatile(
+                       MPLOCKED
+                       "decq %[cnt] ; "
+                       "sete %[ret]"
+                       : [cnt] "+m" (v->cnt),  /* output */
+                         [ret] "=qm" (ret)
+                       );
+       return ret != 0;
+}
+
+/**
+ * Atomically test and set a 64-bit atomic counter.
+ *
+ * If the counter value is already set, return 0 (failed). Otherwise, set
+ * the counter value to 1 and return 1 (success).
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ * @return
+ *   0 if failed; else 1, success.
+ */
+static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
+{
+       return rte_atomic64_cmpset((volatile uint64_t *)&v->cnt, 0, 1);
+}
+
+/**
+ * Atomically set a 64-bit counter to 0.
+ *
+ * @param v
+ *   A pointer to the atomic counter.
+ */
+static inline void rte_atomic64_clear(rte_atomic64_t *v)
+{
+       v->cnt = 0;
+}
+
+#endif /* _RTE_X86_64_ATOMIC_H_ */
diff --git a/lib/librte_eal/linuxapp/Makefile b/lib/librte_eal/linuxapp/Makefile
new file mode 100644 (file)
index 0000000..17b4222
--- /dev/null
@@ -0,0 +1,39 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += igb_uio
+DIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/lib/librte_eal/linuxapp/eal/Makefile b/lib/librte_eal/linuxapp/eal/Makefile
new file mode 100644 (file)
index 0000000..c600cbf
--- /dev/null
@@ -0,0 +1,91 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+LIB = librte_eal.a
+
+VPATH += $(RTE_SDK)/lib/librte_eal/common
+
+CFLAGS += -I$(SRCDIR)/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_ring
+CFLAGS += -I$(RTE_SDK)/lib/librte_mempool
+CFLAGS += -I$(RTE_SDK)/lib/librte_malloc
+CFLAGS += -I$(RTE_SDK)/lib/librte_ether
+CFLAGS += $(WERROR_FLAGS) -O3
+
+# specific to linuxapp exec-env
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) := eal.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hugepage_info.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_memory.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_thread.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_debug.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_lcore.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_hpet.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_interrupts.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_alarm.c
+
+# from common dir
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memzone.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_log.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_launch.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_memory.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_tailqs.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_errno.c
+SRCS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += eal_common_cpuflags.c
+
+CFLAGS_eal.o := -D_GNU_SOURCE
+CFLAGS_eal_thread.o := -D_GNU_SOURCE
+CFLAGS_eal_log.o := -D_GNU_SOURCE
+CFLAGS_eal_common_log.o := -D_GNU_SOURCE
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_eal_thread.o += -Wno-return-type
+CFLAGS_eal_hpet.o += -Wno-return-type
+endif
+
+INC := rte_per_lcore.h rte_lcore.h rte_interrupts.h
+
+SYMLINK-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP)-include/exec-env := \
+       $(addprefix include/exec-env/,$(INC))
+
+DEPDIRS-$(CONFIG_RTE_LIBRTE_EAL_LINUXAPP) += lib/librte_eal/common
+
+include $(RTE_SDK)/mk/rte.lib.mk
+
diff --git a/lib/librte_eal/linuxapp/eal/eal.c b/lib/librte_eal/linuxapp/eal/eal.c
new file mode 100644 (file)
index 0000000..8d82cc3
--- /dev/null
@@ -0,0 +1,620 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <errno.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_log.h>
+#include <rte_random.h>
+#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_version.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+#include "eal_internal_cfg.h"
+#include "eal_fs_paths.h"
+#include "eal_hugepages.h"
+
+#define OPT_HUGE_DIR    "huge-dir"
+#define OPT_PROC_TYPE   "proc-type"
+#define OPT_NO_SHCONF   "no-shconf"
+#define OPT_NO_HPET     "no-hpet"
+#define OPT_NO_PCI      "no-pci"
+#define OPT_NO_HUGE     "no-huge"
+#define OPT_FILE_PREFIX "file-prefix"
+
+#define RTE_EAL_BLACKLIST_SIZE 0x100
+
+#define MEMSIZE_IF_NO_HUGE_PAGE (64ULL * 1024ULL * 1024ULL)
+
+#define GET_BLACKLIST_FIELD(in, fd, lim, dlm)                   \
+{                                                               \
+       unsigned long val;                                      \
+       char *end;                                              \
+       errno = 0;                                              \
+       val = strtoul((in), &end, 16);                          \
+       if (errno != 0 || end[0] != (dlm) || val > (lim))       \
+               return (-EINVAL);                               \
+       (fd) = (typeof (fd))val;                                \
+       (in) = end + 1;                                         \
+}
+
+/* early configuration structure, when memory config is not mmapped */
+static struct rte_mem_config early_mem_config;
+
+/* define fd variable here, because file needs to be kept open for the
+ * duration of the program, as we hold a write lock on it in the primary proc */
+static int mem_cfg_fd = -1;
+
+static struct flock wr_lock = {
+               .l_type = F_WRLCK,
+               .l_whence = SEEK_SET,
+               .l_start = offsetof(struct rte_mem_config, memseg),
+               .l_len = sizeof(early_mem_config.memseg),
+};
+
+/* Address of global and public configuration */
+static struct rte_config rte_config = {
+               .mem_config = &early_mem_config,
+};
+
+static struct rte_pci_addr eal_dev_blacklist[RTE_EAL_BLACKLIST_SIZE];
+
+/* internal configuration (per-core) */
+struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+/* internal configuration */
+struct internal_config internal_config;
+
+/* Return a pointer to the configuration structure */
+struct rte_config *
+rte_eal_get_configuration(void)
+{
+       return &rte_config;
+}
+
+/* create memory configuration in shared/mmap memory. Take out
+ * a write lock on the memsegs, so we can auto-detect primary/secondary.
+ * This means we never close the file while running (auto-close on exit).
+ * We also don't lock the whole file, so that in future we can use read-locks
+ * on other parts, e.g. memzones, to detect if there are running secondary
+ * processes. */
+static void
+rte_eal_config_create(void)
+{
+       void *rte_mem_cfg_addr;
+       int retval;
+
+       const char *pathname = eal_runtime_config_path();
+
+       if (internal_config.no_shconf)
+               return;
+
+       if (mem_cfg_fd < 0){
+               mem_cfg_fd = open(pathname, O_RDWR | O_CREAT, 0660);
+               if (mem_cfg_fd < 0)
+                       rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+       }
+
+       retval = ftruncate(mem_cfg_fd, sizeof(*rte_config.mem_config));
+       if (retval < 0){
+               close(mem_cfg_fd);
+               rte_panic("Cannot resize '%s' for rte_mem_config\n", pathname);
+       }
+
+       retval = fcntl(mem_cfg_fd, F_SETLK, &wr_lock);
+       if (retval < 0){
+               close(mem_cfg_fd);
+               rte_exit(EXIT_FAILURE, "Cannot create lock on '%s'. Is another primary "
+                               "process running?\n", pathname);
+       }
+
+       rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config),
+                          PROT_READ | PROT_WRITE, MAP_SHARED, mem_cfg_fd, 0);
+
+       if (rte_mem_cfg_addr == MAP_FAILED){
+               rte_panic("Cannot mmap memory for rte_config\n");
+       }
+       rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+       memcpy(rte_config.mem_config, &early_mem_config,
+                       sizeof(early_mem_config));
+}
+
+/* attach to an existing shared memory config */
+static void
+rte_eal_config_attach(void)
+{
+       void *rte_mem_cfg_addr;
+       const char *pathname = eal_runtime_config_path();
+
+       if (internal_config.no_shconf)
+               return;
+
+       if (mem_cfg_fd < 0){
+               mem_cfg_fd = open(pathname, O_RDONLY);
+               if (mem_cfg_fd < 0)
+                       rte_panic("Cannot open '%s' for rte_mem_config\n", pathname);
+       }
+
+       rte_mem_cfg_addr = mmap(NULL, sizeof(*rte_config.mem_config), PROT_READ,
+                       MAP_SHARED, mem_cfg_fd, 0);
+       close(mem_cfg_fd);
+       if (rte_mem_cfg_addr == MAP_FAILED)
+               rte_panic("Cannot mmap memory for rte_config\n");
+
+       rte_config.mem_config = (struct rte_mem_config *) rte_mem_cfg_addr;
+}
+
+/* Detect if we are a primary or a secondary process */
+static enum rte_proc_type_t
+eal_proc_type_detect(void)
+{
+       enum rte_proc_type_t ptype = RTE_PROC_PRIMARY;
+       const char *pathname = eal_runtime_config_path();
+
+       /* if we can open the file but not get a write-lock we are a secondary
+        * process. NOTE: if we get a file handle back, we keep that open
+        * and don't close it to prevent a race condition between multiple opens */
+       if (((mem_cfg_fd = open(pathname, O_RDWR)) >= 0) &&
+                       (fcntl(mem_cfg_fd, F_SETLK, &wr_lock) < 0))
+               ptype = RTE_PROC_SECONDARY;
+
+       RTE_LOG(INFO, EAL, "Auto-detected process type: %s\n",
+                       ptype == RTE_PROC_PRIMARY ? "PRIMARY" : "SECONDARY");
+
+       return ptype;
+}
+
+/* Sets up rte_config structure with the pointer to shared memory config.*/
+static void
+rte_config_init(void)
+{
+       /* set the magic in configuration structure */
+       rte_config.magic = RTE_MAGIC;
+       rte_config.process_type = (internal_config.process_type == RTE_PROC_AUTO) ?
+                       eal_proc_type_detect() : /* for auto, detect the type */
+                       internal_config.process_type; /* otherwise use what's already set */
+
+       switch (rte_config.process_type){
+       case RTE_PROC_PRIMARY:
+               rte_eal_config_create();
+               break;
+       case RTE_PROC_SECONDARY:
+               rte_eal_config_attach();
+               break;
+       case RTE_PROC_AUTO:
+       case RTE_PROC_INVALID:
+               rte_panic("Invalid process type\n");
+       }
+}
+
+/* display usage */
+static void
+eal_usage(const char *prgname)
+{
+       printf("\nUsage: %s -c COREMASK -n NUM [-m NB] [-r NUM] [-b <domain:bus:devid.func>]"
+              "[--proc-type primary|secondary|auto] \n\n"
+              "EAL options:\n"
+              "  -c COREMASK: A hexadecimal bitmask of cores to run on\n"
+              "  -n NUM     : Number of memory channels\n"
+                  "  -v         : Display version information on startup\n"
+              "  -b <domain:bus:devid.func>: to prevent EAL from using specified PCI device\n"
+              "               (multiple -b options are alowed)\n"
+              "  -m MB      : memory to allocate (default = size of hugemem)\n"
+              "  -r NUM     : force number of memory ranks (don't detect)\n"
+              "  --"OPT_HUGE_DIR" : directory where hugetlbfs is mounted\n"
+              "  --"OPT_PROC_TYPE": type of this process\n"
+              "  --"OPT_FILE_PREFIX": prefix for hugepage filenames\n"
+              "\nEAL options for DEBUG use only:\n"
+              "  --"OPT_NO_HUGE"  : use malloc instead of hugetlbfs\n"
+              "  --"OPT_NO_PCI"   : disable pci\n"
+              "  --"OPT_NO_HPET"  : disable hpet\n"
+              "  --"OPT_NO_SHCONF": no shared config (mmap'd files)\n\n",
+              prgname);
+}
+
+/*
+ * Parse the coremask given as argument (hexadecimal string) and fill
+ * the global configuration (core role and core count) with the parsed
+ * value.
+ */
+static int
+eal_parse_coremask(const char *coremask)
+{
+       struct rte_config *cfg = rte_eal_get_configuration();
+       unsigned i;
+       char *end = NULL;
+       unsigned long long cm;
+       unsigned count = 0;
+
+       /* parse hexadecimal string */
+       cm = strtoull(coremask, &end, 16);
+       if ((coremask[0] == '\0') || (end == NULL) || (*end != '\0') || (cm == 0))
+               return -1;
+
+       RTE_LOG(DEBUG, EAL, "coremask set to %llx\n", cm);
+       /* set core role and core count */
+       for (i = 0; i < RTE_MAX_LCORE; i++) {
+               if ((1ULL << i) & cm) {
+                       if (count == 0)
+                               cfg->master_lcore = i;
+                       cfg->lcore_role[i] = ROLE_RTE;
+                       count++;
+               }
+               else {
+                       cfg->lcore_role[i] = ROLE_OFF;
+               }
+       }
+       return 0;
+}
+
+static inline uint64_t
+eal_get_hugepage_mem_size(void)
+{
+       uint64_t size = 0;
+       unsigned i;
+
+       for (i = 0; i < internal_config.num_hugepage_sizes; i++){
+               struct hugepage_info *hpi = &internal_config.hugepage_info[i];
+               if (hpi->hugedir != NULL)
+                       size += hpi->hugepage_sz * hpi->num_pages;
+       }
+
+       return (size);
+}
+
+static enum rte_proc_type_t
+eal_parse_proc_type(const char *arg)
+{
+       if (strncasecmp(arg, "primary", sizeof("primary")) == 0)
+               return RTE_PROC_PRIMARY;
+       if (strncasecmp(arg, "secondary", sizeof("secondary")) == 0)
+               return RTE_PROC_SECONDARY;
+       if (strncasecmp(arg, "auto", sizeof("auto")) == 0)
+               return RTE_PROC_AUTO;
+
+       return RTE_PROC_INVALID;
+}
+
+static int
+eal_parse_blacklist(const char *input,  struct rte_pci_addr *dev2bl)
+{
+       GET_BLACKLIST_FIELD(input, dev2bl->domain, UINT16_MAX, ':');
+       GET_BLACKLIST_FIELD(input, dev2bl->bus, UINT8_MAX, ':');
+       GET_BLACKLIST_FIELD(input, dev2bl->devid, UINT8_MAX, '.');
+       GET_BLACKLIST_FIELD(input, dev2bl->function, UINT8_MAX, 0);
+       return (0);
+}
+
+static ssize_t
+eal_parse_blacklist_opt(const char *optarg, size_t idx)
+{
+       if (idx >= sizeof (eal_dev_blacklist) / sizeof (eal_dev_blacklist[0])) {
+               RTE_LOG(ERR, EAL,
+                   "%s - too many devices to blacklist...\n",
+                   optarg);
+               return (-EINVAL);
+       } else if (eal_parse_blacklist(optarg, eal_dev_blacklist + idx) != 0) {
+               RTE_LOG(ERR, EAL,
+                   "%s - invalid device to blacklist...\n",
+                   optarg);
+               return (-EINVAL);
+       }
+
+       idx += 1;
+       return (idx);
+}
+
+
+/* Parse the argument given in the command line of the application */
+static int
+eal_parse_args(int argc, char **argv)
+{
+       int opt, ret;
+       char **argvopt;
+       int option_index;
+       int coremask_ok = 0;
+       ssize_t blacklist_index = 0;;
+       char *prgname = argv[0];
+       static struct option lgopts[] = {
+               {OPT_NO_HUGE, 0, 0, 0},
+               {OPT_NO_PCI, 0, 0, 0},
+               {OPT_NO_HPET, 0, 0, 0},
+               {OPT_HUGE_DIR, 1, 0, 0},
+               {OPT_NO_SHCONF, 0, 0, 0},
+               {OPT_PROC_TYPE, 1, 0, 0},
+               {OPT_FILE_PREFIX, 1, 0, 0},
+               {0, 0, 0, 0}
+       };
+
+       argvopt = argv;
+
+       internal_config.memory = 0;
+       internal_config.force_nrank = 0;
+       internal_config.force_nchannel = 0;
+       internal_config.hugefile_prefix = HUGEFILE_PREFIX_DEFAULT;
+       internal_config.hugepage_dir = NULL;
+#ifdef RTE_LIBEAL_USE_HPET
+       internal_config.no_hpet = 0;
+#else
+       internal_config.no_hpet = 1;
+#endif
+
+       while ((opt = getopt_long(argc, argvopt, "b:c:m:n:r:v",
+                                 lgopts, &option_index)) != EOF) {
+
+               switch (opt) {
+               /* blacklist */
+               case 'b':
+                       if ((blacklist_index = eal_parse_blacklist_opt(optarg,
+                           blacklist_index)) < 0) {
+                               eal_usage(prgname);
+                               return (-1);
+                       }
+                       break;
+               /* coremask */
+               case 'c':
+                       if (eal_parse_coremask(optarg) < 0) {
+                               RTE_LOG(ERR, EAL, "invalid coremask\n");
+                               eal_usage(prgname);
+                               return -1;
+                       }
+                       coremask_ok = 1;
+                       break;
+               /* size of memory */
+               case 'm':
+                       internal_config.memory = atoi(optarg);
+                       internal_config.memory *= 1024ULL;
+                       internal_config.memory *= 1024ULL;
+                       break;
+               /* force number of channels */
+               case 'n':
+                       internal_config.force_nchannel = atoi(optarg);
+                       if (internal_config.force_nchannel == 0 ||
+                           internal_config.force_nchannel > 4) {
+                               RTE_LOG(ERR, EAL, "invalid channel number\n");
+                               eal_usage(prgname);
+                               return -1;
+                       }
+                       break;
+               /* force number of ranks */
+               case 'r':
+                       internal_config.force_nrank = atoi(optarg);
+                       if (internal_config.force_nrank == 0 ||
+                           internal_config.force_nrank > 16) {
+                               RTE_LOG(ERR, EAL, "invalid rank number\n");
+                               eal_usage(prgname);
+                               return -1;
+                       }
+                       break;
+               case 'v':
+                       /* since message is explicitly requested by user, we
+                        * write message at highest log level so it can always be seen
+                        * even if info or warning messages are disabled */
+                       RTE_LOG(CRIT, EAL, "RTE Version: '%s'\n", rte_version());
+                       break;
+
+               /* long options */
+               case 0:
+                       if (!strcmp(lgopts[option_index].name, OPT_NO_HUGE)) {
+                               internal_config.no_hugetlbfs = 1;
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_NO_PCI)) {
+                               internal_config.no_pci = 1;
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_NO_HPET)) {
+                               internal_config.no_hpet = 1;
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_NO_SHCONF)) {
+                               internal_config.no_shconf = 1;
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_HUGE_DIR)) {
+                               internal_config.hugepage_dir = optarg;
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_PROC_TYPE)) {
+                               internal_config.process_type = eal_parse_proc_type(optarg);
+                       }
+                       else if (!strcmp(lgopts[option_index].name, OPT_FILE_PREFIX)) {
+                               internal_config.hugefile_prefix = optarg;
+                       }
+                       break;
+
+               default:
+                       eal_usage(prgname);
+                       return -1;
+               }
+       }
+
+       /* sanity checks */
+       if (!coremask_ok) {
+               RTE_LOG(ERR, EAL, "coremask not specified\n");
+               eal_usage(prgname);
+               return -1;
+       }
+       if (internal_config.process_type == RTE_PROC_AUTO){
+               internal_config.process_type = eal_proc_type_detect();
+       }
+       if (internal_config.process_type == RTE_PROC_INVALID){
+               RTE_LOG(ERR, EAL, "Invalid process type specified\n");
+               eal_usage(prgname);
+               return -1;
+       }
+       if (internal_config.process_type == RTE_PROC_PRIMARY &&
+                       internal_config.force_nchannel == 0) {
+               RTE_LOG(ERR, EAL, "Number of memory channels (-n) not specified\n");
+               eal_usage(prgname);
+               return -1;
+       }
+       if (index(internal_config.hugefile_prefix,'%') != NULL){
+               RTE_LOG(ERR, EAL, "Invalid char, '%%', in '"OPT_FILE_PREFIX"' option\n");
+               eal_usage(prgname);
+               return -1;
+       }
+
+       if (blacklist_index > 0)
+               rte_eal_pci_set_blacklist(eal_dev_blacklist, blacklist_index);
+
+       if (optind >= 0)
+               argv[optind-1] = prgname;
+
+       ret = optind-1;
+       optind = 0; /* reset getopt lib */
+       return ret;
+}
+
+/* Launch threads, called at application init(). */
+int
+rte_eal_init(int argc, char **argv)
+{
+       int i, fctret, ret;
+       pthread_t thread_id;
+
+       thread_id = pthread_self();
+
+       if (rte_eal_log_early_init() < 0)
+               rte_panic("Cannot init early logs\n");
+
+       fctret = eal_parse_args(argc, argv);
+       if (fctret < 0)
+               exit(1);
+
+       if (eal_hugepage_info_init() < 0)
+               rte_panic("Cannot get hugepage information\n");
+
+       if (internal_config.memory == 0) {
+               if (internal_config.no_hugetlbfs)
+                       internal_config.memory = MEMSIZE_IF_NO_HUGE_PAGE;
+               else
+                       internal_config.memory = eal_get_hugepage_mem_size();
+       }
+
+       rte_srand(rte_rdtsc());
+       rte_config_init();
+
+       if (rte_eal_cpu_init() < 0)
+               rte_panic("Cannot detect lcores\n");
+
+       if (rte_eal_memory_init() < 0)
+               rte_panic("Cannot init memory\n");
+
+       if (rte_eal_memzone_init() < 0)
+               rte_panic("Cannot init memzone\n");
+
+       if (rte_eal_tailqs_init() < 0)
+               rte_panic("Cannot init tail queues for objects\n");
+
+       if (rte_eal_log_init() < 0)
+               rte_panic("Cannot init logs\n");
+
+       if (rte_eal_alarm_init() < 0)
+               rte_panic("Cannot init interrupt-handling thread\n");
+
+       if (rte_eal_intr_init() < 0)
+               rte_panic("Cannot init interrupt-handling thread\n");
+
+       if (rte_eal_hpet_init() < 0)
+               rte_panic("Cannot init HPET\n");
+
+       if (rte_eal_pci_init() < 0)
+               rte_panic("Cannot init PCI\n");
+
+       RTE_LOG(DEBUG, EAL, "Master core %u is ready (tid=%x)\n",
+               rte_config.master_lcore, (int)thread_id);
+
+       RTE_LCORE_FOREACH_SLAVE(i) {
+
+               /*
+                * create communication pipes between master thread
+                * and children
+                */
+               if (pipe(lcore_config[i].pipe_master2slave) < 0)
+                       rte_panic("Cannot create pipe\n");
+               if (pipe(lcore_config[i].pipe_slave2master) < 0)
+                       rte_panic("Cannot create pipe\n");
+
+               lcore_config[i].state = WAIT;
+
+               /* create a thread for each lcore */
+               ret = pthread_create(&lcore_config[i].thread_id, NULL,
+                                    eal_thread_loop, NULL);
+               if (ret != 0)
+                       rte_panic("Cannot create thread\n");
+       }
+
+       eal_thread_init_master(rte_config.master_lcore);
+
+       return fctret;
+}
+
+/* get core role */
+enum rte_lcore_role_t
+rte_eal_lcore_role(unsigned lcore_id)
+{
+       return (rte_config.lcore_role[lcore_id]);
+}
+
+enum rte_proc_type_t
+rte_eal_process_type(void)
+{
+       return (rte_config.process_type);
+}
+
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
new file mode 100644 (file)
index 0000000..f2eabf6
--- /dev/null
@@ -0,0 +1,232 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <signal.h>
+#include <errno.h>
+#include <string.h>
+#include <sys/queue.h>
+#include <sys/time.h>
+#include <sys/timerfd.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_interrupts.h>
+#include <rte_alarm.h>
+#include <rte_common.h>
+#include <rte_per_lcore.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_lcore.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <eal_private.h>
+
+#define NS_PER_US 1000
+#define US_PER_MS 1000
+#define MS_PER_S 1000
+#define US_PER_S (US_PER_MS * MS_PER_S)
+
+struct alarm_entry {
+       LIST_ENTRY(alarm_entry) next;
+       struct timeval time;
+       rte_eal_alarm_callback cb_fn;
+       void *cb_arg;
+       volatile int executing;
+};
+
+static LIST_HEAD(alarm_list, alarm_entry) alarm_list = LIST_HEAD_INITIALIZER();
+static rte_spinlock_t alarm_list_lk = RTE_SPINLOCK_INITIALIZER;
+
+static struct rte_intr_handle intr_handle = {.fd = -1 };
+static int handler_registered = 0;
+static void eal_alarm_callback(struct rte_intr_handle *hdl, void *arg);
+
+int
+rte_eal_alarm_init(void)
+{
+       intr_handle.type = RTE_INTR_HANDLE_ALARM;
+       /* create a timerfd file descriptor */
+       intr_handle.fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
+       if (intr_handle.fd == -1)
+               goto error;
+
+       return 0;
+
+error:
+       rte_errno = errno;
+       return -1;
+}
+
+static void
+eal_alarm_callback(struct rte_intr_handle *hdl __rte_unused,
+               void *arg __rte_unused)
+{
+       struct timeval now;
+       struct alarm_entry *ap;
+
+       rte_spinlock_lock(&alarm_list_lk);
+       while ((ap = LIST_FIRST(&alarm_list)) !=NULL &&
+                       gettimeofday(&now, NULL) == 0 &&
+                       (ap->time.tv_sec < now.tv_sec || (ap->time.tv_sec == now.tv_sec &&
+                                               ap->time.tv_usec <= now.tv_usec))){
+               ap->executing = 1;
+               rte_spinlock_unlock(&alarm_list_lk);
+
+               ap->cb_fn(ap->cb_arg);
+
+               rte_spinlock_lock(&alarm_list_lk);
+               LIST_REMOVE(ap, next);
+               rte_free(ap);
+       }
+
+       if (!LIST_EMPTY(&alarm_list)) {
+               struct itimerspec atime = { .it_interval = { 0, 0 } };
+
+               ap = LIST_FIRST(&alarm_list);
+               atime.it_value.tv_sec = ap->time.tv_sec;
+               atime.it_value.tv_nsec = ap->time.tv_usec * NS_PER_US;
+               /* perform borrow for subtraction if necessary */
+               if (now.tv_usec > ap->time.tv_usec)
+                       atime.it_value.tv_sec--, atime.it_value.tv_nsec += US_PER_S * NS_PER_US;
+
+               atime.it_value.tv_sec -= now.tv_sec;
+               atime.it_value.tv_nsec -= now.tv_usec * NS_PER_US;
+               timerfd_settime(intr_handle.fd, 0, &atime, NULL);
+       }
+       rte_spinlock_unlock(&alarm_list_lk);
+}
+
+int
+rte_eal_alarm_set(uint64_t us, rte_eal_alarm_callback cb_fn, void *cb_arg)
+{
+       struct timeval now;
+       int ret = 0;
+       struct alarm_entry *ap, *new_alarm;
+
+       /* Check parameters, including that us won't cause a uint64_t overflow */
+       if (us < 1 || us > (UINT64_MAX - US_PER_S) || cb_fn == NULL)
+               return -EINVAL;
+
+       new_alarm = rte_malloc(NULL, sizeof(*new_alarm), 0);
+       if (new_alarm == NULL)
+               return -ENOMEM;
+
+       /* use current time to calculate absolute time of alarm */
+       gettimeofday(&now, NULL);
+
+       new_alarm->cb_fn = cb_fn;
+       new_alarm->cb_arg = cb_arg;
+       new_alarm->time.tv_usec = (now.tv_usec + us) % US_PER_S;
+       new_alarm->time.tv_sec = now.tv_sec + ((now.tv_usec + us) / US_PER_S);
+       new_alarm->executing = 0;
+
+       rte_spinlock_lock(&alarm_list_lk);
+       if (!handler_registered) {
+               ret |= rte_intr_callback_register(&intr_handle,
+                               eal_alarm_callback, NULL);
+               handler_registered = (ret == 0) ? 1 : 0;
+       }
+
+       if (LIST_EMPTY(&alarm_list))
+               LIST_INSERT_HEAD(&alarm_list, new_alarm, next);
+       else {
+               LIST_FOREACH(ap, &alarm_list, next) {
+                       if (ap->time.tv_sec > new_alarm->time.tv_sec ||
+                                       (ap->time.tv_sec == new_alarm->time.tv_sec &&
+                                                       ap->time.tv_usec > new_alarm->time.tv_usec)){
+                               LIST_INSERT_BEFORE(ap, new_alarm, next);
+                               break;
+                       }
+                       if (LIST_NEXT(ap, next) == NULL) {
+                               LIST_INSERT_AFTER(ap, new_alarm, next);
+                               break;
+                       }
+               }
+       }
+
+       if (LIST_FIRST(&alarm_list) == new_alarm) {
+               struct itimerspec alarm_time = {
+                       .it_interval = {0, 0},
+                       .it_value = {
+                               .tv_sec = us / US_PER_S,
+                               .tv_nsec = (us % US_PER_S) * NS_PER_US,
+                       },
+               };
+               ret |= timerfd_settime(intr_handle.fd, 0, &alarm_time, NULL);
+       }
+       rte_spinlock_unlock(&alarm_list_lk);
+
+       return ret;
+}
+
+int
+rte_eal_alarm_cancel(rte_eal_alarm_callback cb_fn, void *cb_arg)
+{
+       struct alarm_entry *ap, *ap_prev;
+       int count = 0;
+
+       if (!cb_fn)
+               return -1;
+
+       rte_spinlock_lock(&alarm_list_lk);
+       /* remove any matches at the start of the list */
+       while ((ap = LIST_FIRST(&alarm_list)) != NULL &&
+                       cb_fn == ap->cb_fn && ap->executing == 0 &&
+                       (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) {
+               LIST_REMOVE(ap, next);
+               rte_free(ap);
+               count++;
+       }
+       ap_prev = ap;
+
+       /* now go through list, removing entries not at start */
+       LIST_FOREACH(ap, &alarm_list, next) {
+               /* this won't be true first time through */
+               if (cb_fn == ap->cb_fn &&  ap->executing == 0 &&
+                               (cb_arg == (void *)-1 || cb_arg == ap->cb_arg)) {
+                       LIST_REMOVE(ap,next);
+                       rte_free(ap);
+                       count++;
+                       ap = ap_prev;
+               }
+               ap_prev = ap;
+       }
+       rte_spinlock_unlock(&alarm_list_lk);
+       return count;
+}
+
diff --git a/lib/librte_eal/linuxapp/eal/eal_debug.c b/lib/librte_eal/linuxapp/eal/eal_debug.c
new file mode 100644 (file)
index 0000000..c05341d
--- /dev/null
@@ -0,0 +1,114 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <execinfo.h>
+#include <stdarg.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_common.h>
+
+#define BACKTRACE_SIZE 256
+
+/* dump the stack of the calling core */
+void rte_dump_stack(void)
+{
+       void *func[BACKTRACE_SIZE];
+       char **symb = NULL;
+       int size;
+
+       size = backtrace(func, BACKTRACE_SIZE);
+       symb = backtrace_symbols(func, size);
+       while (size > 0) {
+               rte_log(RTE_LOG_ERR, RTE_LOGTYPE_EAL,
+                       "%d: [%s]\n", size, symb[size - 1]);
+               size --;
+       }
+}
+
+/* not implemented in this environment */
+void rte_dump_registers(void)
+{
+       return;
+}
+
+/* call abort(), it will generate a coredump if enabled */
+void __rte_panic(const char *funcname, const char *format, ...)
+{
+       va_list ap;
+
+       /* disable history */
+       rte_log_set_history(0);
+
+       rte_log(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, "PANIC in %s():\n", funcname);
+       va_start(ap, format);
+       rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+       va_end(ap);
+       rte_dump_stack();
+       rte_dump_registers();
+       abort();
+}
+
+/*
+ * Like rte_panic this terminates the application. However, no traceback is
+ * provided and no core-dump is generated.
+ */
+void
+rte_exit(int exit_code, const char *format, ...)
+{
+       va_list ap;
+
+       /* disable history */
+       rte_log_set_history(0);
+
+       if (exit_code != 0)
+               RTE_LOG(CRIT, EAL, "Error - exiting with code: %d\nCause: ", exit_code);
+
+       va_start(ap, format);
+       rte_vlog(RTE_LOG_CRIT, RTE_LOGTYPE_EAL, format, ap);
+       va_end(ap);
+
+#ifndef RTE_EAL_ALWAYS_PANIC_ON_ERROR
+       exit(exit_code);
+#else
+       rte_dump_stack();
+       rte_dump_registers();
+       abort();
+#endif
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_hpet.c b/lib/librte_eal/linuxapp/eal/eal_hpet.c
new file mode 100644 (file)
index 0000000..aa686b1
--- /dev/null
@@ -0,0 +1,232 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <errno.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_cycles.h>
+#include <rte_tailq.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_eal.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+
+#define DEV_HPET "/dev/hpet"
+
+/* Maximum number of counters. */
+#define HPET_TIMER_NUM 3
+
+/* General capabilities register */
+#define CLK_PERIOD_SHIFT     32 /* Clock period shift. */
+#define CLK_PERIOD_MASK      0xffffffff00000000ULL /* Clock period mask. */
+#define COUNT_SIZE_CAP_SHIFT 13 /* Count size capa. shift. */
+#define COUNT_SIZE_CAP_MASK 0x0000000000002000ULL /* Count size capa. mask. */
+
+/**
+ * HPET timer registers. From the Intel IA-PC HPET (High Precision Event
+ * Timers) Specification.
+ */
+struct eal_hpet_regs {
+       /* Memory-mapped, software visible registers */
+       uint64_t capabilities;      /**< RO General Capabilities Register. */
+       uint64_t reserved0;         /**< Reserved for future use. */
+       uint64_t config;            /**< RW General Configuration Register. */
+       uint64_t reserved1;         /**< Reserved for future use. */
+       uint64_t isr;               /**< RW Clear General Interrupt Status. */
+       uint64_t reserved2[25];     /**< Reserved for future use. */
+       union {
+               uint64_t counter;   /**< RW Main Counter Value Register. */
+               struct {
+                       uint32_t counter_l; /**< RW Main Counter Low. */
+                       uint32_t counter_h; /**< RW Main Counter High. */
+               };
+       };
+       uint64_t reserved3;         /**< Reserved for future use. */
+       struct {
+               uint64_t config;    /**< RW Timer Config and Capability Reg. */
+               uint64_t comp;      /**< RW Timer Comparator Value Register. */
+               uint64_t fsb;       /**< RW FSB Interrupt Route Register. */
+               uint64_t reserved4; /**< Reserved for future use. */
+       } timers[HPET_TIMER_NUM]; /**< Set of HPET timers. */
+};
+
+/* Mmap'd hpet registers */
+static volatile struct eal_hpet_regs *eal_hpet = NULL;
+
+/* Period at which the counter increments in femtoseconds (10^-15 seconds). */
+static uint32_t eal_hpet_resolution_fs = 0;
+
+/* Frequency of the counter in Hz */
+static uint64_t eal_hpet_resolution_hz = 0;
+
+/* Incremented 4 times during one 32bits hpet full count */
+static uint32_t eal_hpet_msb;
+
+static pthread_t msb_inc_thread_id;
+
+/*
+ * This function runs on a specific thread to update a global variable
+ * containing used to process MSB of the HPET (unfortunatelly, we need
+ * this because hpet is 32 bits by default under linux).
+ */
+static __attribute__((noreturn)) void *
+hpet_msb_inc(__attribute__((unused)) void *arg)
+{
+       uint32_t t;
+
+       while (1) {
+               t = (eal_hpet->counter_l >> 30);
+               if (t != (eal_hpet_msb & 3))
+                       eal_hpet_msb ++;
+               sleep(10);
+       }
+}
+
+static inline void
+set_rdtsc_freq(void)
+{
+       uint64_t start;
+
+       start = rte_rdtsc();
+       sleep(1);
+       eal_hpet_resolution_hz = rte_rdtsc() - start;
+       eal_hpet_resolution_fs = (uint32_t)
+                       ((1.0 / eal_hpet_resolution_hz) / 1e-15);
+}
+
+/*
+ * Open and mmap /dev/hpet (high precision event timer) that will
+ * provide our time reference.
+ */
+int
+rte_eal_hpet_init(void)
+{
+       int fd, ret;
+
+       if (internal_config.no_hpet) {
+               goto use_rdtsc;
+       }
+
+       fd = open(DEV_HPET, O_RDONLY);
+       if (fd < 0) {
+               RTE_LOG(WARNING, EAL, "WARNING: Cannot open "DEV_HPET": %s! "
+                               "The TSC will be used instead.\n",
+                       strerror(errno));
+               goto use_rdtsc;
+       }
+       eal_hpet = mmap(NULL, 1024, PROT_READ, MAP_SHARED, fd, 0);
+       if (eal_hpet == MAP_FAILED) {
+               RTE_LOG(WARNING, EAL, "WARNING: Cannot mmap "DEV_HPET"! "
+                               "The TSC will be used instead.\n");
+               close(fd);
+               goto use_rdtsc;
+       }
+       close(fd);
+
+       eal_hpet_resolution_fs = (uint32_t)((eal_hpet->capabilities &
+                                       CLK_PERIOD_MASK) >>
+                                       CLK_PERIOD_SHIFT);
+
+       eal_hpet_resolution_hz = (1000ULL*1000ULL*1000ULL*1000ULL*1000ULL) /
+               (uint64_t)eal_hpet_resolution_fs;
+
+       eal_hpet_msb = (eal_hpet->counter_l >> 30);
+
+       /* create a thread that will increment a global variable for
+        * msb (hpet is 32 bits by default under linux) */
+       ret = pthread_create(&msb_inc_thread_id, NULL, hpet_msb_inc, NULL);
+       if (ret < 0) {
+               RTE_LOG(WARNING, EAL, "WARNING: Cannot create HPET timer thread! "
+                               "The TSC will be used instead.\n");
+               goto use_rdtsc;
+       }
+
+       return 0;
+
+use_rdtsc:
+       internal_config.no_hpet = 1;
+       set_rdtsc_freq();
+       return 0;
+}
+
+uint64_t
+rte_get_hpet_hz(void)
+{
+       return eal_hpet_resolution_hz;
+}
+
+uint64_t
+rte_get_hpet_cycles(void)
+{
+       uint32_t t, msb;
+       uint64_t ret;
+
+       if(internal_config.no_hpet)
+               /* fallback to rdtsc */
+               return rte_rdtsc();
+
+       t = eal_hpet->counter_l;
+       msb = eal_hpet_msb;
+       ret = (msb + 2 - (t >> 30)) / 4;
+       ret <<= 32;
+       ret += t;
+       return ret;
+}
+
+void
+rte_delay_us(unsigned us)
+{
+       uint64_t start;
+       uint64_t ticks;
+       ticks = (uint64_t)us * 1000ULL * 1000ULL * 1000ULL;
+       ticks /= eal_hpet_resolution_fs;
+       start = rte_get_hpet_cycles();
+       while ((rte_get_hpet_cycles() - start) < ticks)
+               rte_pause();
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c b/lib/librte_eal/linuxapp/eal/eal_hugepage_info.c
new file mode 100644 (file)
index 0000000..d1ed49a
--- /dev/null
@@ -0,0 +1,229 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include "rte_memory.h"
+#include "rte_memzone.h"
+#include "rte_tailq.h"
+#include "rte_eal.h"
+#include "rte_launch.h"
+#include "rte_per_lcore.h"
+#include "rte_lcore.h"
+#include "rte_debug.h"
+#include "rte_log.h"
+#include "rte_common.h"
+#include "rte_string_fns.h"
+#include "eal_internal_cfg.h"
+#include "eal_hugepages.h"
+
+static const char sys_dir_path[] = "/sys/kernel/mm/hugepages";
+
+static int32_t
+get_num_hugepages(const char *subdir)
+{
+       const char nr_hp_file[] = "nr_hugepages";
+       char path[BUFSIZ];
+       unsigned num_pages = 0;
+
+       rte_snprintf(path, sizeof(path), "%s/%s/%s",
+                       sys_dir_path, subdir, nr_hp_file);
+       FILE *fd = fopen(path, "r");
+       if (fd == NULL || fscanf(fd, "%u", &num_pages) != 1)
+               rte_panic("Error reading file '%s'\n", path);
+       fclose(fd);
+
+       return num_pages;
+}
+
+static uint64_t
+get_default_hp_size(void)
+{
+       const char proc_meminfo[] = "/proc/meminfo";
+       const char str_hugepagesz[] = "Hugepagesize:";
+       unsigned hugepagesz_len = sizeof(str_hugepagesz) - 1;
+       char buffer[256];
+       unsigned long long size = 0;
+
+       FILE *fd = fopen(proc_meminfo, "r");
+       if (fd == NULL)
+               rte_panic("Cannot open %s\n", proc_meminfo);
+       while(fgets(buffer, sizeof(buffer), fd)){
+               if (strncmp(buffer, str_hugepagesz, hugepagesz_len) == 0){
+                       size = rte_str_to_size(&buffer[hugepagesz_len]);
+                       break;
+               }
+       }
+       fclose(fd);
+       if (size == 0)
+               rte_panic("Cannot get default hugepage size from %s\n", proc_meminfo);
+       return size;
+}
+
+static const char *
+get_hugepage_dir(uint64_t hugepage_sz)
+{
+       enum proc_mount_fieldnames {
+               DEVICE = 0,
+               MOUNTPT,
+               FSTYPE,
+               OPTIONS,
+               _FIELDNAME_MAX
+       };
+       static uint64_t default_size = 0;
+       const char proc_mounts[] = "/proc/mounts";
+       const char hugetlbfs_str[] = "hugetlbfs";
+       const size_t htlbfs_str_len = sizeof(hugetlbfs_str) - 1;
+       const char pagesize_opt[] = "pagesize=";
+       const size_t pagesize_opt_len = sizeof(pagesize_opt) - 1;
+       const char split_tok = ' ';
+       char *splitstr[_FIELDNAME_MAX];
+       char buf[BUFSIZ];
+       char *retval = NULL;
+
+       FILE *fd = fopen(proc_mounts, "r");
+       if (fd == NULL)
+               rte_panic("Cannot open %s\n", proc_mounts);
+
+       if (default_size == 0)
+               default_size = get_default_hp_size();
+
+       while (fgets(buf, sizeof(buf), fd)){
+               if (rte_strsplit(buf, sizeof(buf), splitstr, _FIELDNAME_MAX,
+                               split_tok) != _FIELDNAME_MAX) {
+                       RTE_LOG(ERR, EAL, "Error parsing %s\n", proc_mounts);
+                       break; /* return NULL */
+               }
+
+               /* we have a specified --huge-dir option, only examine that dir */
+               if (internal_config.hugepage_dir != NULL &&
+                               strcmp(splitstr[MOUNTPT], internal_config.hugepage_dir) != 0)
+                       continue;
+
+               if (strncmp(splitstr[FSTYPE], hugetlbfs_str, htlbfs_str_len) == 0){
+                       const char *pagesz_str = strstr(splitstr[OPTIONS], pagesize_opt);
+
+                       /* if no explicit page size, the default page size is compared */
+                       if (pagesz_str == NULL){
+                               if (hugepage_sz == default_size){
+                                       retval = strdup(splitstr[MOUNTPT]);
+                                       break;
+                               }
+                       }
+                       /* there is an explicit page size, so check it */
+                       else {
+                               uint64_t pagesz = rte_str_to_size(&pagesz_str[pagesize_opt_len]);
+                               if (pagesz == hugepage_sz) {
+                                       retval = strdup(splitstr[MOUNTPT]);
+                                       break;
+                               }
+                       }
+               } /* end if strncmp hugetlbfs */
+       } /* end while fgets */
+
+       fclose(fd);
+       return retval;
+}
+
+static inline void
+swap_hpi(struct hugepage_info *a, struct hugepage_info *b)
+{
+       char buf[sizeof(*a)];
+       memcpy(buf, a, sizeof(*a));
+       memcpy(a, b, sizeof(*a));
+       memcpy(b, buf, sizeof(*a));
+}
+
+int
+eal_hugepage_info_init(void)
+{
+       const char dirent_start_text[] = "hugepages-";
+       const size_t dirent_start_len = sizeof(dirent_start_text) - 1;
+       unsigned i, num_sizes = 0;
+
+       DIR *dir = opendir(sys_dir_path);
+       if (dir == NULL)
+               rte_panic("Cannot open directory %s to read system hugepage info\n",
+                               sys_dir_path);
+
+       struct dirent *dirent = readdir(dir);
+       while(dirent != NULL){
+               if (strncmp(dirent->d_name, dirent_start_text, dirent_start_len) == 0){
+                       struct hugepage_info *hpi = \
+                                       &internal_config.hugepage_info[num_sizes];
+                       hpi->hugepage_sz = rte_str_to_size(&dirent->d_name[dirent_start_len]);
+                       hpi->num_pages = get_num_hugepages(dirent->d_name);
+                       hpi->hugedir = get_hugepage_dir(hpi->hugepage_sz);
+                       if (hpi->hugedir == NULL){
+                               RTE_LOG(INFO, EAL, "%u hugepages of size %llu reserved, "\
+                                               "but no mounted hugetlbfs found for that size\n",
+                                               hpi->num_pages,
+                                               (unsigned long long)hpi->hugepage_sz);
+                               hpi->num_pages = 0;
+                       } else
+                               num_sizes++;
+               }
+               dirent = readdir(dir);
+       }
+       closedir(dir);
+       internal_config.num_hugepage_sizes = num_sizes;
+
+       /* sort the page directory entries by size, largest to smallest */
+       for (i = 0; i < num_sizes; i++){
+               unsigned j;
+               for (j = i+1; j < num_sizes; j++)
+                       if (internal_config.hugepage_info[j-1].hugepage_sz < \
+                                       internal_config.hugepage_info[j].hugepage_sz)
+                               swap_hpi(&internal_config.hugepage_info[j-1],
+                                               &internal_config.hugepage_info[j]);
+       }
+
+       /* now we have all info, check we have at least one valid size */
+       for (i = 0; i < num_sizes; i++)
+               if (internal_config.hugepage_info[i].hugedir != NULL &&
+                               internal_config.hugepage_info[i].num_pages > 0)
+                       return 0;
+       /* no valid hugepage mounts available, return error */
+       return -1;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_interrupts.c b/lib/librte_eal/linuxapp/eal/eal_interrupts.c
new file mode 100644 (file)
index 0000000..8ff2289
--- /dev/null
@@ -0,0 +1,540 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include <sys/queue.h>
+#include <malloc.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/epoll.h>
+#include <sys/signalfd.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_debug.h>
+#include <rte_log.h>
+#include <rte_mempool.h>
+#include <rte_pci.h>
+#include <rte_malloc.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+
+#include "eal_private.h"
+
+#define EAL_INTR_EPOLL_WAIT_FOREVER (-1)
+
+/**
+ * union for pipe fds.
+ */
+union intr_pipefds{
+       struct {
+               int pipefd[2];
+       };
+       struct {
+               int readfd;
+               int writefd;
+       };
+};
+
+/**
+ * union buffer for reading on different devices
+ */
+union rte_intr_read_buffer {
+       int uio_intr_count;              /* for uio device */
+       uint64_t timerfd_num;            /* for timerfd */
+       char charbuf[16];                /* for others */
+};
+
+TAILQ_HEAD(rte_intr_cb_list, rte_intr_callback);
+TAILQ_HEAD(rte_intr_source_list, rte_intr_source);
+
+struct rte_intr_callback {
+       TAILQ_ENTRY(rte_intr_callback) next;
+       rte_intr_callback_fn cb_fn;  /**< callback address */
+       void *cb_arg;                /**< parameter for callback */
+};
+
+struct rte_intr_source {
+       TAILQ_ENTRY(rte_intr_source) next;
+       struct rte_intr_handle intr_handle; /**< interrupt handle */
+       struct rte_intr_cb_list callbacks;  /**< user callbacks */
+};
+
+/* global spinlock for interrupt data operation */
+static rte_spinlock_t intr_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* union buffer for pipe read/write */
+static union intr_pipefds intr_pipe;
+
+/* interrupt sources list */
+static struct rte_intr_source_list intr_sources;
+
+/* interrupt handling thread */
+static pthread_t intr_thread;
+
+int
+rte_intr_callback_register(struct rte_intr_handle *intr_handle,
+                       rte_intr_callback_fn cb, void *cb_arg)
+{
+       int ret = -1;
+       struct rte_intr_source *src;
+       int wake_thread = 0;
+
+       /* first do parameter checking */
+       if (intr_handle == NULL || intr_handle->fd < 0 || cb == NULL) {
+               RTE_LOG(ERR, EAL,
+                       "Registering with invalid input parameter\n");
+               return -EINVAL;
+       }
+
+       /* allocate a new interrupt callback entity */
+       struct rte_intr_callback *callback =
+               rte_zmalloc("interrupt callback list",
+                               sizeof(*callback), 0);
+       if (callback == NULL) {
+               RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+               return -ENOMEM;
+       }
+       callback->cb_fn = cb;
+       callback->cb_arg = cb_arg;
+
+       rte_spinlock_lock(&intr_lock);
+
+       /* check if there is at least one callback registered for the fd */
+       TAILQ_FOREACH(src, &intr_sources, next)
+       if (src->intr_handle.fd == intr_handle->fd) {
+               if (src->callbacks.tqh_first == NULL)
+                       /* we had no interrupts for this */
+                       wake_thread = 1;
+
+               TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+               break;
+       }
+
+       /* No callback registered for this fd */
+       if (src == NULL){
+               /* no existing callbacks for this - add new source */
+               src = rte_zmalloc("interrupt source list", sizeof(*src), 0);
+               if (src == NULL){
+                       RTE_LOG(ERR, EAL, "Can not allocate memory\n");
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               src->intr_handle = *intr_handle;
+               TAILQ_INIT(&src->callbacks);
+
+               TAILQ_INSERT_TAIL(&intr_sources, src, next);
+               TAILQ_INSERT_TAIL(&(src->callbacks), callback, next);
+               wake_thread = 1;
+       }
+
+       rte_spinlock_unlock(&intr_lock);
+       /**
+        * check if need to notify the pipe fd waited by epoll_wait to
+        * rebuild the wait list.
+        */
+       if (wake_thread)
+               if (write(intr_pipe.writefd, "1", 1) < 0)
+                       return -EPIPE;
+
+       return 0;
+
+error:
+       rte_spinlock_unlock(&intr_lock);
+
+       return ret;
+}
+
+int
+rte_intr_callback_unregister(struct rte_intr_handle *intr_handle,
+                       rte_intr_callback_fn cb_fn, void *cb_arg)
+{
+       int ret = -1;
+       struct rte_intr_source *src;
+       struct rte_intr_callback *cb;
+
+       /* do parameter checking first */
+       if (intr_handle == NULL || intr_handle->fd < 0) {
+               RTE_LOG(ERR, EAL,
+               "Unregistering with invalid input parameter\n");
+               return -EINVAL;
+       }
+
+       rte_spinlock_lock(&intr_lock);
+
+       /* check if the insterrupt source for the fd is existent */
+       TAILQ_FOREACH(src, &intr_sources, next)
+               if (src->intr_handle.fd == intr_handle->fd)
+                       break;
+
+       /* No interrupt source registered for the fd */
+       if (src == NULL) {
+               ret = -ENOENT;
+               goto error;
+       }
+
+       ret = 0;
+       TAILQ_FOREACH(cb, &src->callbacks, next) {
+               if (cb->cb_fn != cb_fn)
+                       continue;
+               if (cb_arg == (void *)-1 || cb->cb_arg == cb_arg) {
+                       TAILQ_REMOVE(&src->callbacks, cb, next);
+                       rte_free(cb);
+                       ret ++;
+               }
+
+               if (src->callbacks.tqh_first == NULL) {
+                       TAILQ_REMOVE(&intr_sources, src, next);
+                       rte_free(src);
+               }
+       }
+
+       /* notify the pipe fd waited by epoll_wait to rebuild the wait list */
+       if (write(intr_pipe.writefd, "1", 1) < 0) {
+               ret = -EPIPE;
+               goto error;
+       }
+
+       rte_spinlock_unlock(&intr_lock);
+
+       return ret;
+
+error:
+       rte_spinlock_unlock(&intr_lock);
+
+       return ret;
+}
+
+int
+rte_intr_enable(struct rte_intr_handle *intr_handle)
+{
+       const int value = 1;
+
+       if (!intr_handle || intr_handle->fd < 0)
+               return -1;
+
+       switch (intr_handle->type){
+       /* write to the uio fd to enable the interrupt */
+       case RTE_INTR_HANDLE_UIO:
+               if (write(intr_handle->fd, &value, sizeof(value)) < 0) {
+                       RTE_LOG(ERR, EAL,
+                               "Error enabling interrupts for fd %d\n",
+                                                       intr_handle->fd);
+                       return -1;
+               }
+               break;
+       /* not used at this moment */
+       case RTE_INTR_HANDLE_ALARM:
+               return -1;
+       /* unkown handle type */
+       default:
+               RTE_LOG(ERR, EAL,
+                       "Unknown handle type of fd %d\n",
+                                       intr_handle->fd);
+               return -1;
+       }
+
+       return 0;
+}
+
+int
+rte_intr_disable(struct rte_intr_handle *intr_handle)
+{
+       const int value = 0;
+
+       if (!intr_handle || intr_handle->fd < 0)
+               return -1;
+
+       switch (intr_handle->type){
+       /* write to the uio fd to disable the interrupt */
+       case RTE_INTR_HANDLE_UIO:
+               if (write(intr_handle->fd, &value, sizeof(value)) < 0){
+                       RTE_LOG(ERR, EAL,
+                               "Error enabling interrupts for fd %d\n",
+                                                       intr_handle->fd);
+                       return -1;
+               }
+               break;
+       /* not used at this moment */
+       case RTE_INTR_HANDLE_ALARM:
+               return -1;
+       /* unkown handle type */
+       default:
+               RTE_LOG(ERR, EAL,
+                       "Unknown handle type of fd %d\n",
+                                       intr_handle->fd);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+eal_intr_process_interrupts(struct epoll_event *events, int nfds)
+{
+       int n, i, active_cb, bytes_read;
+       struct rte_intr_source *src;
+       struct rte_intr_callback *cb;
+       union rte_intr_read_buffer buf;
+       struct rte_intr_callback active_cbs[32];
+
+       for (n = 0; n < nfds; n++) {
+               /**
+                * if the pipe fd is ready to read, return out to
+                * rebuild the wait list.
+                */
+               if (events[n].data.fd == intr_pipe.readfd){
+                       int r = read(intr_pipe.readfd, buf.charbuf,
+                                       sizeof(buf.charbuf));
+                       RTE_SET_USED(r);
+                       return -1;
+               }
+               rte_spinlock_lock(&intr_lock);
+               TAILQ_FOREACH(src, &intr_sources, next)
+                       if (src->intr_handle.fd ==
+                                       events[n].data.fd)
+                               break;
+               if (src == NULL){
+                       rte_spinlock_unlock(&intr_lock);
+                       continue;
+               }
+
+               /* for this source, make a copy of all the callbacks,
+                * then unlock the lock, so the callbacks can
+                * themselves manipulate the list for future
+                * instances.
+                */
+               active_cb = 0;
+               memset(active_cbs, 0, sizeof(active_cbs));
+               TAILQ_FOREACH(cb, &src->callbacks, next)
+                       active_cbs[active_cb++] = *cb;
+               rte_spinlock_unlock(&intr_lock);
+
+               /* set the length to be read dor different handle type */
+               switch (src->intr_handle.type) {
+               case RTE_INTR_HANDLE_UIO:
+                       bytes_read = 4;
+                       break;
+               case RTE_INTR_HANDLE_ALARM:
+                       bytes_read = sizeof(uint64_t);
+                       break;
+               default:
+                       bytes_read = 1;
+                       break;
+               }
+               /**
+                * read out to clear the ready-to-be-read flag
+                * for epoll_wait.
+                */
+               bytes_read = read(events[n].data.fd, &buf, bytes_read);
+               if (bytes_read < 0) {
+                       RTE_LOG(ERR, EAL, "Error reading from file descriptor"
+                               " %d, error: %d\n", events[n].data.fd, errno);
+                       continue;
+               }
+               else if (bytes_read == 0) {
+                       RTE_LOG(ERR, EAL,
+                               "Read nothing from file descriptor %d.\n",
+                                                       events[n].data.fd);
+                       continue;
+               }
+               /**
+                * Finally, call all callbacks from the copy
+                * we made earlier.
+                */
+               for (i = 0; i < active_cb; i++) {
+                       if (active_cbs[i].cb_fn == NULL)
+                               continue;
+                       active_cbs[i].cb_fn(&src->intr_handle,
+                                       active_cbs[i].cb_arg);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * It handles all the interrupts.
+ *
+ * @param pfd
+ *  epoll file descriptor.
+ * @param totalfds
+ *  The number of file descriptors added in epoll.
+ *
+ * @return
+ *  void
+ */
+static void
+eal_intr_handle_interrupts(int pfd, unsigned totalfds)
+{
+       struct epoll_event events[totalfds];
+       int nfds = 0;
+
+       for(;;) {
+               nfds = epoll_wait(pfd, events, totalfds,
+                       EAL_INTR_EPOLL_WAIT_FOREVER);
+               /* epoll_wait fail */
+               if (nfds < 0) {
+                       if (errno == EINTR)
+                               continue;
+                       RTE_LOG(ERR, EAL,
+                               "epoll_wait returns with fail\n");
+                       return;
+               }
+               /* epoll_wait timeout, will never happens here */
+               else if (nfds == 0)
+                       continue;
+               /* epoll_wait has at least one fd ready to read */
+               if (eal_intr_process_interrupts(events, nfds) < 0)
+                       return;
+       }
+}
+
+/**
+ * It builds/rebuilds up the epoll file descriptor with all the
+ * file descriptors being waited on. Then handles the interrupts.
+ *
+ * @param arg
+ *  pointer. (unused)
+ *
+ * @return
+ *  never return;
+ */
+static __attribute__((noreturn)) void *
+eal_intr_thread_main(__rte_unused void *arg)
+{
+       struct epoll_event ev;
+
+       /* host thread, never break out */
+       for (;;) {
+               /* build up the epoll fd with all descriptors we are to
+                * wait on then pass it to the handle_interrupts function
+                */
+               static struct epoll_event pipe_event = {
+                       .events = EPOLLIN | EPOLLPRI,
+               };
+               struct rte_intr_source *src;
+               unsigned numfds = 0;
+
+               /* create epoll fd */
+               int pfd = epoll_create(1);
+               if (pfd < 0)
+                       rte_panic("Cannot create epoll instance\n");
+
+               pipe_event.data.fd = intr_pipe.readfd;
+               /**
+                * add pipe fd into wait list, this pipe is used to
+                * rebuild the wait list.
+                */
+               if (epoll_ctl(pfd, EPOLL_CTL_ADD, intr_pipe.readfd,
+                                               &pipe_event) < 0) {
+                       rte_panic("Error adding fd to %d epoll_ctl, %s\n",
+                                       intr_pipe.readfd, strerror(errno));
+               }
+               numfds++;
+
+               rte_spinlock_lock(&intr_lock);
+
+               TAILQ_FOREACH(src, &intr_sources, next) {
+                       if (src->callbacks.tqh_first == NULL)
+                               continue; /* skip those with no callbacks */
+                       ev.events = EPOLLIN | EPOLLPRI;
+                       ev.data.fd = src->intr_handle.fd;
+
+                       /**
+                        * add all the uio device file descriptor
+                        * into wait list.
+                        */
+                       if (epoll_ctl(pfd, EPOLL_CTL_ADD,
+                                       src->intr_handle.fd, &ev) < 0){
+                               rte_panic("Error adding fd %d epoll_ctl, %s\n",
+                                       src->intr_handle.fd, strerror(errno));
+                       }
+                       else
+                               numfds++;
+               }
+               rte_spinlock_unlock(&intr_lock);
+               /* serve the interrupt */
+               eal_intr_handle_interrupts(pfd, numfds);
+
+               /**
+                * when we return, we need to rebuild the
+                * list of fds to monitor.
+                */
+               close(pfd);
+       }
+}
+
+int
+rte_eal_intr_init(void)
+{
+       int ret = 0;
+
+       /* init the global interrupt source head */
+       TAILQ_INIT(&intr_sources);
+
+       /**
+        * create a pipe which will be waited by epoll and notified to
+        * rebuild the wait list of epoll.
+        */
+       if (pipe(intr_pipe.pipefd) < 0)
+               return -1;
+
+       /* create the host thread to wait/handle the interrupt */
+       ret = pthread_create(&intr_thread, NULL,
+                       eal_intr_thread_main, NULL);
+       if (ret != 0)
+               RTE_LOG(ERR, EAL,
+                       "Failed to create thread for interrupt handling\n");
+
+       return -ret;
+}
+
diff --git a/lib/librte_eal/linuxapp/eal/eal_lcore.c b/lib/librte_eal/linuxapp/eal/eal_lcore.c
new file mode 100644 (file)
index 0000000..dde9bc1
--- /dev/null
@@ -0,0 +1,192 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+
+#include "eal_private.h"
+
+#define PROC_CPUINFO "/proc/cpuinfo"
+#define PROC_PROCESSOR_FMT ""
+
+/* parse one line and try to match "processor : %d". */
+static int
+parse_processor_id(const char *buf, unsigned *lcore_id)
+{
+       static const char _processor[] = "processor";
+       const char *s;
+
+       if (strncmp(buf, _processor, sizeof(_processor) - 1) != 0)
+               return -1;
+
+       s = strchr(buf, ':');
+       if (s == NULL)
+               return -1;
+
+       errno = 0;
+       *lcore_id = strtoul(s+1, NULL, 10);
+       if (errno != 0) {
+               *lcore_id = -1;
+               return -1;
+       }
+
+       return 0;
+}
+
+/* parse one line and try to match "physical id : %d". */
+static int
+parse_socket_id(const char *buf, unsigned *socket_id)
+{
+       static const char _physical_id[] = "physical id";
+       const char *s;
+
+       if (strncmp(buf, _physical_id, sizeof(_physical_id) - 1) != 0)
+               return -1;
+
+       s = strchr(buf, ':');
+       if (s == NULL)
+               return -1;
+
+       errno = 0;
+       *socket_id = strtoul(s+1, NULL, 10);
+       if (errno != 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Parse /proc/cpuinfo to get the number of physical and logical
+ * processors on the machine. The function will fill the cpu_info
+ * structure.
+ */
+int
+rte_eal_cpu_init(void)
+{
+       struct rte_config *config;
+       FILE *f;
+       char buf[BUFSIZ];
+       unsigned lcore_id = 0;
+       unsigned socket_id = 0;
+       unsigned count = 0;
+
+       /* get pointer to global configuration */
+       config = rte_eal_get_configuration();
+
+       /* open /proc/cpuinfo */
+       f = fopen(PROC_CPUINFO, "r");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): Cannot find "PROC_CPUINFO"\n", __func__);
+               return -1;
+       }
+
+       /*
+        * browse lines of /proc/cpuinfo and fill memseg entries in
+        * global configuration
+        */
+       while (fgets(buf, sizeof(buf), f) != NULL) {
+
+               if (parse_processor_id(buf, &lcore_id) == 0)
+                       continue;
+
+               if (parse_socket_id(buf, &socket_id) == 0)
+                       continue;
+
+               if (buf[0] == '\n') {
+                       RTE_LOG(DEBUG, EAL, "Detected lcore %u on socket %u\n",
+                               lcore_id, socket_id);
+                       if (lcore_id >= RTE_MAX_LCORE) {
+                               RTE_LOG(DEBUG, EAL,
+                                       "Skip lcore %u >= RTE_MAX_LCORE\n",
+                                         lcore_id);
+                               continue;
+                       }
+
+                       /*
+                        * In a virtualization environment, the socket ID
+                        * reported by the system may not be linked to a real
+                        * physical socket ID, and may be incoherent. So in this
+                        * case, a default socket ID of 0 is assigned.
+                        */
+                       if (socket_id >= RTE_MAX_NUMA_NODES) {
+#ifdef CONFIG_RTE_EAL_ALLOW_INV_SOCKET_ID
+                               socket_id = 0;
+#else
+                               rte_panic("Socket ID (%u) is greater than "
+                                   "RTE_MAX_NUMA_NODES (%d)\n",
+                                   socket_id, RTE_MAX_NUMA_NODES);
+#endif
+                       }
+
+                       lcore_config[lcore_id].detected = 1;
+                       lcore_config[lcore_id].socket_id = socket_id;
+
+               }
+       }
+
+       fclose(f);
+
+       /* disable lcores that were not detected */
+       RTE_LCORE_FOREACH(lcore_id) {
+
+               if (lcore_config[lcore_id].detected == 0) {
+                       RTE_LOG(DEBUG, EAL, "Skip lcore %u (not detected)\n",
+                               lcore_id);
+                       config->lcore_role[lcore_id] = ROLE_OFF;
+               }
+               else
+                       count ++;
+       }
+
+       config->lcore_count = count;
+
+       return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_log.c b/lib/librte_eal/linuxapp/eal/eal_log.c
new file mode 100644 (file)
index 0000000..3d3d7ba
--- /dev/null
@@ -0,0 +1,137 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <syslog.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+
+#include "eal_private.h"
+
+/*
+ * default log function, used once mempool (hence log history) is
+ * available
+ */
+static ssize_t
+console_log_write(__attribute__((unused)) void *c, const char *buf, size_t size)
+{
+       char copybuf[BUFSIZ + 1];
+       ssize_t ret;
+       uint32_t loglevel;
+
+       /* add this log in history */
+       rte_log_add_in_history(buf, size);
+
+       /* write on stdout */
+       ret = fwrite(buf, 1, size, stdout);
+       fflush(stdout);
+
+       /* truncate message if too big (should not happen) */
+       if (size > BUFSIZ)
+               size = BUFSIZ;
+
+       /* Syslog error levels are from 0 to 7, so subtract 1 to convert */
+       loglevel = rte_log_cur_msg_loglevel() - 1;
+       memcpy(copybuf, buf, size);
+       copybuf[size] = '\0';
+
+       /* write on syslog too */
+       syslog(loglevel, "%s", copybuf);
+
+       return ret;
+}
+
+static ssize_t
+console_log_read(__attribute__((unused)) void *c,
+                __attribute__((unused)) char *buf,
+                __attribute__((unused)) size_t size)
+{
+       return 0;
+}
+
+static int
+console_log_seek(__attribute__((unused)) void *c,
+                __attribute__((unused)) off64_t *offset,
+                __attribute__((unused)) int whence)
+{
+       return -1;
+}
+
+static int
+console_log_close(__attribute__((unused)) void *c)
+{
+       return 0;
+}
+
+static cookie_io_functions_t console_log_func = {
+       .read  = console_log_read,
+       .write = console_log_write,
+       .seek  = console_log_seek,
+       .close = console_log_close
+};
+
+/*
+ * set the log to default function, called during eal init process,
+ * once memzones are available.
+ */
+int
+rte_eal_log_init(void)
+{
+       FILE *log_stream;
+
+       log_stream = fopencookie(NULL, "w+", console_log_func);
+       if (log_stream == NULL)
+               return -1;
+
+       openlog("rte", LOG_NDELAY | LOG_PID, LOG_DAEMON);
+
+       if (rte_eal_common_log_init(log_stream) < 0)
+               return -1;
+
+       return 0;
+}
+
diff --git a/lib/librte_eal/linuxapp/eal/eal_memory.c b/lib/librte_eal/linuxapp/eal/eal_memory.c
new file mode 100644 (file)
index 0000000..a47dab4
--- /dev/null
@@ -0,0 +1,796 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <errno.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/queue.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+
+#include "eal_private.h"
+#include "eal_internal_cfg.h"
+#include "eal_fs_paths.h"
+#include "eal_hugepages.h"
+
+/**
+ * @file
+ * Huge page mapping under linux
+ *
+ * To reserve a big contiguous amount of memory, we use the hugepage
+ * feature of linux. For that, we need to have hugetlbfs mounted. This
+ * code will create many files in this directory (one per page) and
+ * map them in virtual memory. For each page, we will retrieve its
+ * physical address and remap it in order to have a virtual contiguous
+ * zone as well as a physical contiguous zone.
+ */
+
+
+#define RANDOMIZE_VA_SPACE_FILE "/proc/sys/kernel/randomize_va_space"
+
+/*
+ * Check whether address-space layout randomization is enabled in
+ * the kernel. This is important for multi-process as it can prevent
+ * two processes mapping data to the same virtual address
+ * Returns:
+ *    0 - address space randomization disabled
+ *    1/2 - address space randomization enabled
+ *    negative error code on error
+ */
+static int
+aslr_enabled(void)
+{
+       char c;
+       int retval, fd = open(RANDOMIZE_VA_SPACE_FILE, O_RDONLY);
+       if (fd < 0)
+               return -errno;
+       retval = read(fd, &c, 1);
+       close(fd);
+       if (retval < 0)
+               return -errno;
+       if (retval == 0)
+               return -EIO;
+       switch (c) {
+               case '0' : return 0;
+               case '1' : return 1;
+               case '2' : return 2;
+               default: return -EINVAL;
+       }
+}
+
+/*
+ * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * pointer to the mmap'd area and keep *size unmodified. Else, retry
+ * with a smaller zone: decrease *size by hugepage_sz until it reaches
+ * 0. In this case, return NULL. Note: this function returns an address
+ * which is a multiple of hugepage size.
+ */
+static void *
+get_virtual_area(uint64_t *size, uint64_t hugepage_sz)
+{
+       void *addr;
+       int fd;
+       long aligned_addr;
+
+       RTE_LOG(INFO, EAL, "Ask a virtual area of 0x%"PRIx64" bytes\n", *size);
+
+       fd = open("/dev/zero", O_RDONLY);
+       if (fd < 0){
+               RTE_LOG(ERR, EAL, "Cannot open /dev/zero\n");
+               return NULL;
+       }
+       do {
+               addr = mmap(NULL, (*size) + hugepage_sz, PROT_READ, MAP_PRIVATE, fd, 0);
+               if (addr == MAP_FAILED)
+                       *size -= hugepage_sz;
+       } while (addr == MAP_FAILED && *size > 0);
+
+       if (addr == MAP_FAILED) {
+               close(fd);
+               RTE_LOG(INFO, EAL, "Cannot get a virtual area\n");
+               return NULL;
+       }
+
+       munmap(addr, (*size) + hugepage_sz);
+       close(fd);
+
+       /* align addr to a huge page size boundary */
+       aligned_addr = (long)addr;
+       aligned_addr += (hugepage_sz - 1);
+       aligned_addr &= (~(hugepage_sz - 1));
+       addr = (void *)(aligned_addr);
+
+       RTE_LOG(INFO, EAL, "Virtual area found at %p (size = 0x%"PRIx64")\n",
+               addr, *size);
+
+       return addr;
+}
+
+/*
+ * Mmap all hugepages of hugepage table: it first open a file in
+ * hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
+ * virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
+ * in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
+ * map continguous physical blocks in contiguous virtual blocks.
+ */
+static int
+map_all_hugepages(struct hugepage *hugepg_tbl,
+               struct hugepage_info *hpi, int orig)
+{
+       int fd;
+       unsigned i;
+       void *virtaddr;
+       void *vma_addr = NULL;
+       uint64_t vma_len = 0;
+
+       for (i = 0; i < hpi->num_pages; i++) {
+               uint64_t hugepage_sz = hpi->hugepage_sz;
+
+               if (orig) {
+                       hugepg_tbl[i].file_id = i;
+                       hugepg_tbl[i].size = hugepage_sz;
+                       eal_get_hugefile_path(hugepg_tbl[i].filepath,
+                                       sizeof(hugepg_tbl[i].filepath), hpi->hugedir,
+                                       hugepg_tbl[i].file_id);
+                       hugepg_tbl[i].filepath[sizeof(hugepg_tbl[i].filepath) - 1] = '\0';
+               }
+#ifndef RTE_ARCH_X86_64
+               /* for 32-bit systems, don't remap 1G pages, just reuse original
+                * map address as final map address.
+                */
+               else if (hugepage_sz == RTE_PGSIZE_1G){
+                       hugepg_tbl[i].final_va = hugepg_tbl[i].orig_va;
+                       hugepg_tbl[i].orig_va = NULL;
+                       continue;
+               }
+#endif
+               else if (vma_len == 0) {
+                       unsigned j, num_pages;
+
+                       /* reserve a virtual area for next contiguous
+                        * physical block: count the number of
+                        * contiguous physical pages. */
+                       for (j = i+1; j < hpi->num_pages ; j++) {
+                               if (hugepg_tbl[j].physaddr !=
+                                   hugepg_tbl[j-1].physaddr + hugepage_sz)
+                                       break;
+                       }
+                       num_pages = j - i;
+                       vma_len = num_pages * hugepage_sz;
+
+                       /* get the biggest virtual memory area up to
+                        * vma_len. If it fails, vma_addr is NULL, so
+                        * let the kernel provide the address. */
+                       vma_addr = get_virtual_area(&vma_len, hpi->hugepage_sz);
+                       if (vma_addr == NULL)
+                               vma_len = hugepage_sz;
+               }
+
+               fd = open(hugepg_tbl[i].filepath, O_CREAT | O_RDWR, 0755);
+               if (fd < 0) {
+                       RTE_LOG(ERR, EAL, "%s(): open failed: %s", __func__,
+                                       strerror(errno));
+                       return -1;
+               }
+
+               virtaddr = mmap(vma_addr, hugepage_sz, PROT_READ | PROT_WRITE,
+                               MAP_SHARED, fd, 0);
+               if (virtaddr == MAP_FAILED) {
+                       RTE_LOG(ERR, EAL, "%s(): mmap failed: %s", __func__,
+                                       strerror(errno));
+                       close(fd);
+                       return -1;
+               }
+               if (orig) {
+                       hugepg_tbl[i].orig_va = virtaddr;
+                       memset(virtaddr, 0, hugepage_sz);
+               }
+               else {
+                       hugepg_tbl[i].final_va = virtaddr;
+               }
+
+               vma_addr = (char *)vma_addr + hugepage_sz;
+               vma_len -= hugepage_sz;
+               close(fd);
+       }
+       return 0;
+}
+
+/* Unmap all hugepages from original mapping. */
+static int
+unmap_all_hugepages_orig(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+{
+       unsigned i;
+       for (i = 0; i < hpi->num_pages; i++) {
+               if (hugepg_tbl[i].orig_va) {
+                       munmap(hugepg_tbl[i].orig_va, hpi->hugepage_sz);
+                       hugepg_tbl[i].orig_va = NULL;
+               }
+       }
+       return 0;
+}
+
+/*
+ * For each hugepage in hugepg_tbl, fill the physaddr value. We find
+ * it by browsing the /proc/self/pagemap special file.
+ */
+static int
+find_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+{
+       int fd;
+       unsigned i;
+       uint64_t page;
+       unsigned long virt_pfn;
+       int page_size;
+
+       /* standard page size */
+       page_size = getpagesize();
+
+       fd = open("/proc/self/pagemap", O_RDONLY);
+       if (fd < 0) {
+               RTE_LOG(ERR, EAL, "%s(): cannot open /proc/self/pagemap: %s",
+                       __func__, strerror(errno));
+               return -1;
+       }
+
+       for (i = 0; i < hpi->num_pages; i++) {
+               off_t offset;
+               virt_pfn = (unsigned long)hugepg_tbl[i].orig_va /
+                       page_size;
+               offset = sizeof(uint64_t) * virt_pfn;
+               if (lseek(fd, offset, SEEK_SET) != offset){
+                       RTE_LOG(ERR, EAL, "%s(): seek error in /proc/self/pagemap: %s",
+                                       __func__, strerror(errno));
+                       close(fd);
+                       return -1;
+               }
+               if (read(fd, &page, sizeof(uint64_t)) < 0) {
+                       RTE_LOG(ERR, EAL, "%s(): cannot read /proc/self/pagemap: %s",
+                                       __func__, strerror(errno));
+                       close(fd);
+                       return -1;
+               }
+
+               /*
+                * the pfn (page frame number) are bits 0-54 (see
+                * pagemap.txt in linux Documentation)
+                */
+               hugepg_tbl[i].physaddr = ((page & 0x7fffffffffffffULL) * page_size);
+       }
+       close(fd);
+       return 0;
+}
+
+/*
+ * Parse /proc/self/numa_maps to get the NUMA socket ID for each huge
+ * page.
+ */
+static int
+find_numasocket(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+{
+       int socket_id;
+       char *end, *nodestr;
+       unsigned i, hp_count = 0;
+       uint64_t virt_addr;
+       char buf[BUFSIZ];
+       char hugedir_str[PATH_MAX];
+       FILE *f;
+
+       f = fopen("/proc/self/numa_maps", "r");
+       if (f == NULL) {
+               RTE_LOG(INFO, EAL, "cannot open /proc/self/numa_maps,"
+                               "consider that all memory is in socket_id 0");
+               return 0;
+       }
+
+       rte_snprintf(hugedir_str, sizeof(hugedir_str),
+                       "%s/", hpi->hugedir);
+
+       /* parse numa map */
+       while (fgets(buf, sizeof(buf), f) != NULL) {
+
+               /* ignore non huge page */
+               if (strstr(buf, " huge ") == NULL &&
+                               strstr(buf, hugedir_str) == NULL)
+                       continue;
+
+               /* get zone addr */
+               virt_addr = strtoull(buf, &end, 16);
+               if (virt_addr == 0 || end == buf) {
+                       RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+                       goto error;
+               }
+
+               /* get node id (socket id) */
+               nodestr = strstr(buf, " N");
+               if (nodestr == NULL) {
+                       RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+                       goto error;
+               }
+               nodestr += 2;
+               end = strstr(nodestr, "=");
+               if (end == NULL) {
+                       RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+                       goto error;
+               }
+               end[0] = '\0';
+               end = NULL;
+
+               socket_id = strtoul(nodestr, &end, 0);
+               if ((nodestr[0] == '\0') || (end == NULL) || (*end != '\0')) {
+                       RTE_LOG(ERR, EAL, "%s(): error in numa_maps parsing\n", __func__);
+                       goto error;
+               }
+
+               /* if we find this page in our mappings, set socket_id */
+               for (i = 0; i < hpi->num_pages; i++) {
+                       void *va = (void *)(unsigned long)virt_addr;
+                       if (hugepg_tbl[i].orig_va == va) {
+                               hugepg_tbl[i].socket_id = socket_id;
+                               hp_count++;
+                       }
+               }
+       }
+       if (hp_count < hpi->num_pages)
+               goto error;
+       fclose(f);
+       return 0;
+
+error:
+       fclose(f);
+       return -1;
+}
+
+/*
+ * Sort the hugepg_tbl by physical address (lower addresses first). We
+ * use a slow algorithm, but we won't have millions of pages, and this
+ * is only done at init time.
+ */
+static int
+sort_by_physaddr(struct hugepage *hugepg_tbl, struct hugepage_info *hpi)
+{
+       unsigned i, j;
+       int smallest_idx;
+       uint64_t smallest_addr;
+       struct hugepage tmp;
+
+       for (i = 0; i < hpi->num_pages; i++) {
+               smallest_addr = 0;
+               smallest_idx = -1;
+
+               /*
+                * browse all entries starting at 'i', and find the
+                * entry with the smallest addr
+                */
+               for (j=i; j<hpi->num_pages; j++) {
+
+                       if (smallest_addr == 0 ||
+                           hugepg_tbl[j].physaddr < smallest_addr) {
+                               smallest_addr = hugepg_tbl[j].physaddr;
+                               smallest_idx = j;
+                       }
+               }
+
+               /* should not happen */
+               if (smallest_idx == -1) {
+                       RTE_LOG(ERR, EAL, "%s(): error in physaddr sorting\n", __func__);
+                       return -1;
+               }
+
+               /* swap the 2 entries in the table */
+               memcpy(&tmp, &hugepg_tbl[smallest_idx], sizeof(struct hugepage));
+               memcpy(&hugepg_tbl[smallest_idx], &hugepg_tbl[i],
+                               sizeof(struct hugepage));
+               memcpy(&hugepg_tbl[i], &tmp, sizeof(struct hugepage));
+       }
+       return 0;
+}
+
+/*
+ * Uses mmap to create a shared memory area for storage of data
+ *Used in this file to store the hugepage file map on disk
+ */
+static void *
+create_shared_memory(const char *filename, const size_t mem_size)
+{
+       void *retval;
+       int fd = open(filename, O_CREAT | O_RDWR, 0666);
+       if (fd < 0)
+               return NULL;
+       if (ftruncate(fd, mem_size) < 0) {
+               close(fd);
+               return NULL;
+       }
+       retval = mmap(NULL, mem_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+       close(fd);
+       return retval;
+}
+
+/*
+ * This function takes in the list of hugepage sizes and the
+ * number of pages thereof, and calculates the best number of
+ * pages of each size to fulfill the request for <memory> ram
+ */
+static int
+calc_num_pages(uint64_t memory,
+               struct hugepage_info *hp_info,
+               struct hugepage_info *hp_used,
+               unsigned num_hp_info)
+{
+       unsigned i = 0;
+       int total_num_pages = 0;
+       if (num_hp_info == 0)
+               return -1;
+
+       for (i = 0; i < num_hp_info; i++){
+               hp_used[i].hugepage_sz = hp_info[i].hugepage_sz;
+               hp_used[i].hugedir = hp_info[i].hugedir;
+               hp_used[i].num_pages = RTE_MIN(memory / hp_info[i].hugepage_sz,
+                               hp_info[i].num_pages);
+
+               memory -= hp_used[i].num_pages * hp_used[i].hugepage_sz;
+               total_num_pages += hp_used[i].num_pages;
+
+               /* check if we have met all memory requests */
+               if (memory == 0)
+                       break;
+               /* check if we have any more pages left at this size, if so
+                * move on to next size */
+               if (hp_used[i].num_pages == hp_info[i].num_pages)
+                       continue;
+               /* At this point we know that there are more pages available that are
+                * bigger than the memory we want, so lets see if we can get enough
+                * from other page sizes.
+                */
+               unsigned j;
+               uint64_t remaining_mem = 0;
+               for (j = i+1; j < num_hp_info; j++)
+                       remaining_mem += hp_info[j].hugepage_sz * hp_info[j].num_pages;
+
+               /* is there enough other memory, if not allocate another page and quit*/
+               if (remaining_mem < memory){
+                       memory -= hp_info[i].hugepage_sz;
+                       hp_used[i].num_pages++;
+                       total_num_pages++;
+                       break; /* we are done */
+               }
+       }
+       return total_num_pages;
+}
+
+/*
+ * Prepare physical memory mapping: fill configuration structure with
+ * these infos, return 0 on success.
+ *  1. map N huge pages in separate files in hugetlbfs
+ *  2. find associated physical addr
+ *  3. find associated NUMA socket ID
+ *  4. sort all huge pages by physical address
+ *  5. remap these N huge pages in the correct order
+ *  6. unmap the first mapping
+ *  7. fill memsegs in configuration with contiguous zones
+ */
+static int
+rte_eal_hugepage_init(void)
+{
+       struct rte_mem_config *mcfg;
+       struct hugepage *hugepage;
+       struct hugepage_info used_hp[MAX_HUGEPAGE_SIZES];
+       int i, j, new_memseg;
+       int nrpages;
+       void *addr;
+
+       memset(used_hp, 0, sizeof(used_hp));
+
+       /* get pointer to global configuration */
+       mcfg = rte_eal_get_configuration()->mem_config;
+
+       /* for debug purposes, hugetlbfs can be disabled */
+       if (internal_config.no_hugetlbfs) {
+               addr = malloc(internal_config.memory);
+               mcfg->memseg[0].phys_addr = (unsigned long)addr;
+               mcfg->memseg[0].addr = addr;
+               mcfg->memseg[0].len = internal_config.memory;
+               mcfg->memseg[0].socket_id = 0;
+               return 0;
+       }
+
+       nrpages = calc_num_pages(internal_config.memory,
+                       &internal_config.hugepage_info[0], &used_hp[0],
+                       internal_config.num_hugepage_sizes);
+       for (i = 0; i < (int)internal_config.num_hugepage_sizes; i++)
+               RTE_LOG(INFO, EAL, "Requesting %u pages of size %"PRIu64"\n",
+                               used_hp[i].num_pages, used_hp[i].hugepage_sz);
+
+       hugepage = create_shared_memory(eal_hugepage_info_path(),
+                       nrpages * sizeof(struct hugepage));
+       if (hugepage == NULL)
+               return -1;
+       memset(hugepage, 0, nrpages * sizeof(struct hugepage));
+
+       unsigned hp_offset = 0; /* where we start the current page size entries */
+       for (i = 0; i < (int)internal_config.num_hugepage_sizes; i ++){
+               struct hugepage_info *hpi = &used_hp[i];
+               if (hpi->num_pages == 0)
+                       continue;
+
+               if (map_all_hugepages(&hugepage[hp_offset], hpi, 1) < 0){
+                       RTE_LOG(DEBUG, EAL, "Failed to mmap %u MB hugepages\n",
+                                       (unsigned)(hpi->hugepage_sz / 0x100000));
+                       goto fail;
+               }
+
+               if (find_physaddr(&hugepage[hp_offset], hpi) < 0){
+                       RTE_LOG(DEBUG, EAL, "Failed to find phys addr for %u MB pages\n",
+                                       (unsigned)(hpi->hugepage_sz / 0x100000));
+                       goto fail;
+               }
+
+               if (find_numasocket(&hugepage[hp_offset], hpi) < 0){
+                       RTE_LOG(DEBUG, EAL, "Failed to find NUMA socket for %u MB pages\n",
+                                       (unsigned)(hpi->hugepage_sz / 0x100000));
+                       goto fail;
+               }
+
+               if (sort_by_physaddr(&hugepage[hp_offset], hpi) < 0)
+                       goto fail;
+
+               if (map_all_hugepages(&hugepage[hp_offset], hpi, 0) < 0){
+                       RTE_LOG(DEBUG, EAL, "Failed to remap %u MB pages\n",
+                                       (unsigned)(hpi->hugepage_sz / 0x100000));
+                       goto fail;
+               }
+
+               if (unmap_all_hugepages_orig(&hugepage[hp_offset], hpi) < 0)
+                       goto fail;
+
+               /* we have processed a num of hugepages of this size, so inc offset */
+               hp_offset += hpi->num_pages;
+       }
+
+       memset(mcfg->memseg, 0, sizeof(mcfg->memseg));
+       j = -1;
+       for (i = 0; i < nrpages; i++) {
+               new_memseg = 0;
+
+               /* if this is a new section, create a new memseg */
+               if (i == 0)
+                       new_memseg = 1;
+               else if (hugepage[i].socket_id != hugepage[i-1].socket_id)
+                       new_memseg = 1;
+               else if (hugepage[i].size != hugepage[i-1].size)
+                       new_memseg = 1;
+               else if ((hugepage[i].physaddr - hugepage[i-1].physaddr) !=
+                        hugepage[i].size)
+                       new_memseg = 1;
+               else if (((unsigned long)hugepage[i].final_va -
+                    (unsigned long)hugepage[i-1].final_va) != hugepage[i].size)
+                       new_memseg = 1;
+
+               if (new_memseg) {
+                       j += 1;
+                       if (j == RTE_MAX_MEMSEG)
+                               break;
+
+                       mcfg->memseg[j].phys_addr = hugepage[i].physaddr;
+                       mcfg->memseg[j].addr = hugepage[i].final_va;
+                       mcfg->memseg[j].len = hugepage[i].size;
+                       mcfg->memseg[j].socket_id = hugepage[i].socket_id;
+                       mcfg->memseg[j].hugepage_sz = hugepage[i].size;
+               }
+               /* continuation of previous memseg */
+               else {
+                       mcfg->memseg[j].len += mcfg->memseg[j].hugepage_sz;
+               }
+               hugepage[i].memseg_id = j;
+       }
+
+       return 0;
+
+
+ fail:
+       return -1;
+}
+
+/*
+ * uses fstat to report the size of a file on disk
+ */
+static off_t
+getFileSize(int fd)
+{
+       struct stat st;
+       if (fstat(fd, &st) < 0)
+               return 0;
+       return st.st_size;
+}
+
+/*
+ * This creates the memory mappings in the secondary process to match that of
+ * the server process. It goes through each memory segment in the DPDK runtime
+ * configuration and finds the hugepages which form that segment, mapping them
+ * in order to form a contiguous block in the virtual memory space
+ */
+static int
+rte_eal_hugepage_attach(void)
+{
+       const struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
+       const struct hugepage *hp = NULL;
+       unsigned num_hp = 0;
+       unsigned i, s = 0; /* s used to track the segment number */
+       off_t size;
+       int fd, fd_zero = -1, fd_hugepage = -1;
+
+       if (aslr_enabled() > 0) {
+               RTE_LOG(WARNING, EAL, "WARNING: Address Space Layout Randomization "
+                               "(ASLR) is enabled in the kernel.\n");
+               RTE_LOG(WARNING, EAL, "   This may cause issues with mapping memory "
+                               "into secondary processes\n");
+       }
+
+       fd_zero = open("/dev/zero", O_RDONLY);
+       if (fd_zero < 0) {
+               RTE_LOG(ERR, EAL, "Could not open /dev/zero\n");
+               goto error;
+       }
+       fd_hugepage = open(eal_hugepage_info_path(), O_RDONLY);
+       if (fd_hugepage < 0) {
+               RTE_LOG(ERR, EAL, "Could not open %s\n", eal_hugepage_info_path());
+               goto error;
+       }
+
+       size = getFileSize(fd_hugepage);
+       hp = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd_hugepage, 0);
+       if (hp == NULL) {
+               RTE_LOG(ERR, EAL, "Could not mmap %s\n", eal_hugepage_info_path());
+               goto error;
+       }
+
+       num_hp = size / sizeof(struct hugepage);
+       RTE_LOG(DEBUG, EAL, "Analysing %u hugepages\n", num_hp);
+
+       while (s < RTE_MAX_MEMSEG && mcfg->memseg[s].len > 0){
+               void *addr, *base_addr;
+               uintptr_t offset = 0;
+
+               /* fdzero is mmapped to get a contiguous block of virtual addresses
+                * get a block of free memory of the appropriate size -
+                * use mmap to attempt to get an identical address as server.
+                */
+               base_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
+                               PROT_READ, MAP_PRIVATE, fd_zero, 0);
+               if (base_addr == MAP_FAILED || base_addr != mcfg->memseg[s].addr) {
+                       RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
+                               "in /dev/zero to requested address [%p]\n",
+                               (unsigned long long)mcfg->memseg[s].len,
+                               mcfg->memseg[s].addr);
+                       if (aslr_enabled() > 0)
+                               RTE_LOG(ERR, EAL, "It is recommended to disable ASLR in the kernel "
+                                               "and retry running both primary and secondary processes\n");
+                       goto error;
+               }
+               /* free memory so we can map the hugepages into the space */
+               munmap(base_addr, mcfg->memseg[s].len);
+
+               /* find the hugepages for this segment and map them
+                * we don't need to worry about order, as the server sorted the
+                * entries before it did the second mmap of them */
+               for (i = 0; i < num_hp && offset < mcfg->memseg[s].len; i++){
+                       if (hp[i].memseg_id == (int)s){
+                               fd = open(hp[i].filepath, O_RDWR);
+                               if (fd < 0) {
+                                       RTE_LOG(ERR, EAL, "Could not open %s\n",
+                                               hp[i].filepath);
+                                       goto error;
+                               }
+                               addr = mmap(RTE_PTR_ADD(base_addr, offset),
+                                               hp[i].size, PROT_READ | PROT_WRITE,
+                                               MAP_SHARED | MAP_FIXED, fd, 0);
+                               close(fd); /* close file both on success and on failure */
+                               if (addr == MAP_FAILED) {
+                                       RTE_LOG(ERR, EAL, "Could not mmap %s\n",
+                                               hp[i].filepath);
+                                       goto error;
+                               }
+                               offset+=hp[i].size;
+                       }
+               }
+               RTE_LOG(DEBUG, EAL, "Mapped segment %u of size 0x%llx\n", s,
+                               (unsigned long long)mcfg->memseg[s].len);
+               s++;
+       }
+       close(fd_zero);
+       close(fd_hugepage);
+       return 0;
+
+error:
+       if (fd_zero >= 0)
+               close(fd_zero);
+       if (fd_hugepage >= 0)
+               close(fd_hugepage);
+       return -1;
+}
+
+static int
+rte_eal_memdevice_init(void)
+{
+       struct rte_config *config;
+
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+               return 0;
+
+       config = rte_eal_get_configuration();
+       config->mem_config->nchannel = internal_config.force_nchannel;
+       config->mem_config->nrank = internal_config.force_nrank;
+
+       return 0;
+}
+
+
+/* init memory subsystem */
+int
+rte_eal_memory_init(void)
+{
+       const int retval = rte_eal_process_type() == RTE_PROC_PRIMARY ?
+                       rte_eal_hugepage_init() :
+                       rte_eal_hugepage_attach();
+       if (retval < 0)
+               return -1;
+
+       if (internal_config.no_shconf == 0 && rte_eal_memdevice_init() < 0)
+               return -1;
+
+       return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_pci.c b/lib/librte_eal/linuxapp/eal/eal_pci.c
new file mode 100644 (file)
index 0000000..78687d6
--- /dev/null
@@ -0,0 +1,770 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <dirent.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_launch.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_debug.h>
+
+#include "eal_internal_cfg.h"
+#include "eal_private.h"
+
+/**
+ * @file
+ * PCI probing under linux
+ *
+ * This code is used to simulate a PCI probe by parsing information in
+ * sysfs. Moreover, when a registered driver matches a device, the
+ * kernel driver currently using it is unloaded and replaced by
+ * igb_uio module, which is a very minimal userland driver for Intel
+ * network card, only providing access to PCI BAR to applications, and
+ * enabling bus master.
+ */
+
+
+#define PROC_MODULES "/proc/modules"
+
+#define IGB_UIO_NAME "igb_uio"
+
+#define UIO_NEWID "/sys/bus/pci/drivers/%s/new_id"
+#define UIO_BIND  "/sys/bus/pci/drivers/%s/bind"
+
+/* maximum time to wait that /dev/uioX appears */
+#define UIO_DEV_WAIT_TIMEOUT 3 /* seconds */
+
+/*
+ * For multi-process we need to reproduce all PCI mappings in secondary
+ * processes, so save them in a tailq.
+ */
+struct uio_resource {
+       TAILQ_ENTRY(uio_resource) next;
+
+       struct rte_pci_addr pci_addr;
+       void *addr;
+       char path[PATH_MAX];
+       unsigned long size;
+       unsigned long offset;
+};
+
+TAILQ_HEAD(uio_res_list, uio_resource);
+
+static struct uio_res_list *uio_res_list = NULL;
+static int pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+/*
+ * Check that a kernel module is loaded. Returns 0 on success, or if the
+ * parameter is NULL, or -1 if the module is not loaded.
+ */
+static int
+pci_uio_check_module(const char *module_name)
+{
+       FILE *f;
+       unsigned i;
+       char buf[BUFSIZ];
+
+       if (module_name == NULL)
+               return 0;
+
+       f = fopen(PROC_MODULES, "r");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "Cannot open "PROC_MODULES"\n");
+               return -1;
+       }
+
+       while(fgets(buf, sizeof(buf), f) != NULL) {
+
+               for (i = 0; i < sizeof(buf) && buf[i] != '\0'; i++) {
+                       if (isspace(buf[i]))
+                           buf[i] = '\0';
+               }
+
+               if (strncmp(buf, module_name, sizeof(buf)) == 0) {
+                       fclose(f);
+                       return 0;
+               }
+       }
+       fclose(f);
+       RTE_LOG(ERR, EAL, "Cannot find %s in "PROC_MODULES"\n", module_name);
+       return -1;
+}
+
+/* bind a PCI to the kernel module driver */
+static int
+pci_uio_bind_device(struct rte_pci_device *dev, const char *module_name)
+{
+       FILE *f;
+       int n;
+       char buf[BUFSIZ];
+       char uio_newid[PATH_MAX];
+       char uio_bind[PATH_MAX];
+       struct rte_pci_addr *loc = &dev->addr;
+
+       RTE_LOG(DEBUG, EAL, "bind PCI device "PCI_PRI_FMT" to %s driver\n",
+               loc->domain, loc->bus, loc->devid, loc->function, module_name);
+
+       n = rte_snprintf(uio_newid, sizeof(uio_newid), UIO_NEWID, module_name);
+       if ((n < 0) || (n >= (int)sizeof(uio_newid))) {
+               RTE_LOG(ERR, EAL, "Cannot rte_snprintf uio_newid name\n");
+               return -1;
+       }
+       n = rte_snprintf(uio_bind, sizeof(uio_bind), UIO_BIND, module_name);
+       if ((n < 0) || (n >= (int)sizeof(uio_bind))) {
+               RTE_LOG(ERR, EAL, "Cannot rte_snprintf uio_bind name\n");
+               return -1;
+       }
+
+       n = rte_snprintf(buf, sizeof(buf), "%x %x\n",
+                       dev->id.vendor_id, dev->id.device_id);
+       if ((n < 0) || (n >= (int)sizeof(buf))) {
+               RTE_LOG(ERR, EAL, "Cannot rte_snprintf vendor_id/device_id\n");
+               return -1;
+       }
+
+       f = fopen(uio_newid, "w");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "Cannot open %s\n", uio_newid);
+               return -1;
+       }
+       if (fwrite(buf, n, 1, f) == 0) {
+               fclose(f);
+               return -1;
+       }
+       fclose(f);
+
+       f = fopen(uio_bind, "w");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "Cannot open %s\n", uio_bind);
+               return -1;
+       }
+       n = rte_snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n",
+                        loc->domain, loc->bus, loc->devid, loc->function);
+       if ((n < 0) || (n >= (int)sizeof(buf))) {
+               RTE_LOG(ERR, EAL, "Cannot rte_snprintf PCI infos\n");
+               fclose(f);
+               return -1;
+       }
+       if (fwrite(buf, n, 1, f) == 0) {
+               fclose(f);
+               return -1;
+       }
+
+       RTE_LOG(DEBUG, EAL, "Device bound\n");
+
+       fclose(f);
+       return 0;
+}
+
+/* map a particular resource from a file */
+static void *
+pci_map_resource(struct rte_pci_device *dev, void *requested_addr, const char *devname,
+               unsigned long offset, unsigned long size)
+{
+       unsigned n;
+       int fd;
+       void *mapaddr;
+
+       /*
+        * open devname, and mmap it: it can take some time to
+        * appear, so we wait some time before returning an error
+        */
+       for (n=0; n<UIO_DEV_WAIT_TIMEOUT*10; n++) {
+               fd = open(devname, O_RDWR);
+               if (fd >= 0)
+                       break;
+               if (errno != ENOENT)
+                       break;
+               usleep(100000);
+       }
+       if (fd < 0) {
+               RTE_LOG(ERR, EAL, "Cannot open %s: %s\n", devname, strerror(errno));
+               goto fail;
+       }
+
+       /* Map the PCI memory resource of device */
+       mapaddr = mmap(requested_addr, size, PROT_READ | PROT_WRITE,
+                       MAP_SHARED, fd, offset);
+       if (mapaddr == MAP_FAILED ||
+                       (requested_addr != NULL && mapaddr != requested_addr)) {
+               RTE_LOG(ERR, EAL, "%s(): cannot mmap %s: %s\n", __func__,
+                       devname, strerror(errno));
+               close(fd);
+               goto fail;
+       }
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+               /* save fd if in primary process */
+               dev->intr_handle.fd = fd;
+               dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
+       } else {
+               /* fd is not needed in slave process, close it */
+               dev->intr_handle.fd = -1;
+               dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+               close(fd);
+       }
+
+       RTE_LOG(DEBUG, EAL, "PCI memory mapped at %p\n", mapaddr);
+
+       return mapaddr;
+
+fail:
+       dev->intr_handle.fd = -1;
+       dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
+
+       return NULL;
+}
+/* map the PCI resource of a PCI device in virtual memory */
+static int
+pci_uio_map_resource(struct rte_pci_device *dev)
+{
+       struct dirent *e;
+       DIR *dir;
+       char dirname[PATH_MAX];
+       char dirname2[PATH_MAX];
+       char filename[PATH_MAX];
+       char devname[PATH_MAX]; /* contains the /dev/uioX */
+       void *mapaddr;
+       unsigned uio_num;
+       unsigned long size, offset;
+       struct rte_pci_addr *loc = &dev->addr;
+       struct uio_resource *uio_res;
+
+       RTE_LOG(DEBUG, EAL, "map PCI resource for device "PCI_PRI_FMT"\n",
+               loc->domain, loc->bus, loc->devid, loc->function);
+
+       /* secondary processes - use already recorded details */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+
+                       TAILQ_FOREACH(uio_res, uio_res_list, next) {
+                               /* skip this element if it doesn't match our PCI address */
+                               if (memcmp(&uio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
+                                       continue;
+
+                               if (pci_map_resource(dev, uio_res->addr, uio_res->path, \
+                                               uio_res->offset, uio_res->size) == uio_res->addr)
+                                       return 0;
+                               else {
+                                       RTE_LOG(ERR, EAL, "Cannot mmap device resource\n");
+                                       return -1;
+                               }
+                       }
+                       RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
+                       return -1;
+       }
+
+       /* depending on kernel version, uio can be located in uio/uioX
+        * or uio:uioX */
+
+       rte_snprintf(dirname, sizeof(dirname),
+                "/sys/bus/pci/devices/" PCI_PRI_FMT "/uio",
+                loc->domain, loc->bus, loc->devid, loc->function);
+
+       dir = opendir(dirname);
+       if (dir == NULL) {
+               /* retry with the parent directory */
+               rte_snprintf(dirname, sizeof(dirname),
+                        "/sys/bus/pci/devices/" PCI_PRI_FMT,
+                        loc->domain, loc->bus, loc->devid, loc->function);
+               dir = opendir(dirname);
+
+               if (dir == NULL) {
+                       RTE_LOG(ERR, EAL, "Cannot opendir %s\n", dirname);
+                       return -1;
+               }
+       }
+
+       /* take the first file starting with "uio" */
+       while ((e = readdir(dir)) != NULL) {
+               int shortprefix_len = sizeof("uio") - 1; /* format could be uio%d ...*/
+               int longprefix_len = sizeof("uio:uio") - 1; /* ... or uio:uio%d */
+               char *endptr;
+
+               if (strncmp(e->d_name, "uio", 3) != 0)
+                       continue;
+
+               /* first try uio%d */
+               errno = 0;
+               uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+               if (errno == 0 && endptr != e->d_name) {
+                       rte_snprintf(dirname2, sizeof(dirname2),
+                                "%s/uio%u", dirname, uio_num);
+                       break;
+               }
+
+               /* then try uio:uio%d */
+               errno = 0;
+               uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+               if (errno == 0 && endptr != e->d_name) {
+                       rte_snprintf(dirname2, sizeof(dirname2),
+                                "%s/uio:uio%u", dirname, uio_num);
+                       break;
+               }
+       }
+       closedir(dir);
+
+       /* No uio resource found */
+       if (e == NULL)
+               return 0;
+
+       /* get mapping offset */
+       rte_snprintf(filename, sizeof(filename),
+                "%s/maps/map0/offset", dirname2);
+       if (pci_parse_sysfs_value(filename, &offset) < 0) {
+               RTE_LOG(ERR, EAL, "%s(): cannot parse offset\n",
+                       __func__);
+               return -1;
+       }
+
+       /* get mapping size */
+       rte_snprintf(filename, sizeof(filename),
+                "%s/maps/map0/size", dirname2);
+       if (pci_parse_sysfs_value(filename, &size) < 0) {
+               RTE_LOG(ERR, EAL, "%s(): cannot parse size\n",
+                       __func__);
+               return -1;
+       }
+
+       /* open and mmap /dev/uioX */
+       rte_snprintf(devname, sizeof(devname), "/dev/uio%u", uio_num);
+       mapaddr = pci_map_resource(dev, NULL, devname, offset, size);
+       if (mapaddr == NULL)
+               return -1;
+       dev->mem_resource.addr = mapaddr;
+
+       /* save the mapping details for secondary processes*/
+       uio_res = rte_malloc("UIO_RES", sizeof(*uio_res), 0);
+       if (uio_res == NULL){
+               RTE_LOG(ERR, EAL, "%s(): cannot store uio mmap details\n", __func__);
+               return -1;
+       }
+       uio_res->addr = mapaddr;
+       uio_res->offset = offset;
+       uio_res->size = size;
+       rte_snprintf(uio_res->path, sizeof(uio_res->path), "%s", devname);
+       memcpy(&uio_res->pci_addr, &dev->addr, sizeof(uio_res->pci_addr));
+
+       TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
+
+       return 0;
+}
+
+/* parse the "resource" sysfs file */
+#define IORESOURCE_MEM  0x00000200
+
+static int
+pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+       FILE *f;
+       char buf[BUFSIZ];
+       union pci_resource_info {
+               struct {
+                       char *phys_addr;
+                       char *end_addr;
+                       char *flags;
+               };
+               char *ptrs[PCI_RESOURCE_FMT_NVAL];
+       } res_info;
+       int i;
+       uint64_t phys_addr, end_addr, flags;
+
+       f = fopen(filename, "r");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
+               return -1;
+       }
+
+       for (i = 0; i<PCI_MAX_RESOURCE; i++) {
+
+               if (fgets(buf, sizeof(buf), f) == NULL) {
+                       RTE_LOG(ERR, EAL, "%s(): cannot read resource\n", __func__);
+                       goto error;
+               }
+
+               if (rte_strsplit(buf, sizeof(buf), res_info.ptrs, 3, ' ') != 3) {
+                       RTE_LOG(ERR, EAL, "%s(): bad resource format\n", __func__);
+                       goto error;
+               }
+               errno = 0;
+               phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+               end_addr = strtoull(res_info.end_addr, NULL, 16);
+               flags = strtoull(res_info.flags, NULL, 16);
+               if (errno != 0) {
+                       RTE_LOG(ERR, EAL, "%s(): bad resource format\n", __func__);
+                       goto error;
+               }
+
+               if (flags & IORESOURCE_MEM) {
+                       dev->mem_resource.phys_addr = phys_addr;
+                       dev->mem_resource.len = end_addr - phys_addr + 1;
+                       dev->mem_resource.addr = NULL; /* not mapped for now */
+                       break;
+               }
+       }
+       fclose(f);
+       return 0;
+
+error:
+       fclose(f);
+       return -1;
+}
+
+/* parse a sysfs file containing one integer value */
+static int
+pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+       FILE *f;
+       char buf[BUFSIZ];
+       char *end = NULL;
+
+       f = fopen(filename, "r");
+       if (f == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): cannot open sysfs value %s\n",
+                       __func__, filename);
+               return -1;
+       }
+
+       if (fgets(buf, sizeof(buf), f) == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): cannot read sysfs value %s\n",
+                       __func__, filename);
+               fclose(f);
+               return -1;
+       }
+       *val = strtoul(buf, &end, 0);
+       if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+               RTE_LOG(ERR, EAL, "%s(): cannot parse sysfs value %s\n",
+                               __func__, filename);
+               fclose(f);
+               return -1;
+       }
+       fclose(f);
+       return 0;
+}
+
+/* Scan one pci sysfs entry, and fill the devices list from it. */
+static int
+pci_scan_one(const char *dirname, uint16_t domain, uint8_t bus,
+            uint8_t devid, uint8_t function)
+{
+       char filename[PATH_MAX];
+       unsigned long tmp;
+       struct rte_pci_device *dev;
+
+       dev = malloc(sizeof(*dev));
+       if (dev == NULL) {
+               return -1;
+       }
+
+       memset(dev, 0, sizeof(*dev));
+       dev->addr.domain = domain;
+       dev->addr.bus = bus;
+       dev->addr.devid = devid;
+       dev->addr.function = function;
+
+       /* get vendor id */
+       rte_snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+       if (pci_parse_sysfs_value(filename, &tmp) < 0) {
+               free(dev);
+               return -1;
+       }
+       dev->id.vendor_id = (uint16_t)tmp;
+
+       /* get device id */
+       rte_snprintf(filename, sizeof(filename), "%s/device", dirname);
+       if (pci_parse_sysfs_value(filename, &tmp) < 0) {
+               free(dev);
+               return -1;
+       }
+       dev->id.device_id = (uint16_t)tmp;
+
+       /* get subsystem_vendor id */
+       rte_snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+                dirname);
+       if (pci_parse_sysfs_value(filename, &tmp) < 0) {
+               free(dev);
+               return -1;
+       }
+       dev->id.subsystem_vendor_id = (uint16_t)tmp;
+
+       /* get subsystem_device id */
+       rte_snprintf(filename, sizeof(filename), "%s/subsystem_device",
+                dirname);
+       if (pci_parse_sysfs_value(filename, &tmp) < 0) {
+               free(dev);
+               return -1;
+       }
+       dev->id.subsystem_device_id = (uint16_t)tmp;
+
+       /* parse resources */
+       rte_snprintf(filename, sizeof(filename), "%s/resource", dirname);
+       if (pci_parse_sysfs_resource(filename, dev) < 0) {
+               RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
+               free(dev);
+               return -1;
+       }
+
+       /* device is valid, add in list */
+       TAILQ_INSERT_TAIL(&device_list, dev, next);
+
+       return 0;
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+static int
+parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+               uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+       /* first split on ':' */
+       union splitaddr {
+               struct {
+                       char *domain;
+                       char *bus;
+                       char *devid;
+                       char *function;
+               };
+               char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
+       } splitaddr;
+
+       char *buf_copy = strndup(buf, bufsize);
+       if (buf_copy == NULL)
+               return -1;
+
+       if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+                       != PCI_FMT_NVAL - 1)
+               goto error;
+       /* final split is on '.' between devid and function */
+       splitaddr.function = strchr(splitaddr.devid,'.');
+       if (splitaddr.function == NULL)
+               goto error;
+       *splitaddr.function++ = '\0';
+
+       /* now convert to int values */
+       errno = 0;
+       *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+       *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+       *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+       *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+       if (errno != 0)
+               goto error;
+
+       free(buf_copy); /* free the copy made with strdup */
+       return 0;
+error:
+       free(buf_copy);
+       return -1;
+}
+
+/*
+ * Scan the content of the PCI bus, and the devices in the devices
+ * list
+ */
+static int
+pci_scan(void)
+{
+       struct dirent *e;
+       DIR *dir;
+       char dirname[PATH_MAX];
+       uint16_t domain;
+       uint8_t bus, devid, function;
+
+       dir = opendir(SYSFS_PCI_DEVICES);
+       if (dir == NULL) {
+               RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
+                       __func__, strerror(errno));
+               return -1;
+       }
+
+       while ((e = readdir(dir)) != NULL) {
+               if (e->d_name[0] == '.')
+                       continue;
+
+               if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &domain,
+                               &bus, &devid, &function) != 0)
+                       continue;
+
+               rte_snprintf(dirname, sizeof(dirname), "%s/%s", SYSFS_PCI_DEVICES,
+                        e->d_name);
+               if (pci_scan_one(dirname, domain, bus, devid, function) < 0)
+                       goto error;
+       }
+       closedir(dir);
+       return 0;
+
+error:
+       closedir(dir);
+       return -1;
+}
+
+/* unbind kernel driver for this device */
+static int
+pci_unbind_kernel_driver(struct rte_pci_device *dev)
+{
+       int n;
+       FILE *f;
+       char filename[PATH_MAX];
+       char buf[BUFSIZ];
+       struct rte_pci_addr *loc = &dev->addr;
+
+       /* open /sys/bus/pci/devices/AAAA:BB:CC.D/driver */
+       rte_snprintf(filename, sizeof(filename),
+                SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/driver/unbind",
+                loc->domain, loc->bus, loc->devid, loc->function);
+
+       RTE_LOG(DEBUG, EAL, "unbind kernel driver %s\n", filename);
+
+       f = fopen(filename, "w");
+       if (f == NULL) /* device was not bound */
+               return 0;
+
+       n = rte_snprintf(buf, sizeof(buf), PCI_PRI_FMT "\n",
+                    loc->domain, loc->bus, loc->devid, loc->function);
+       if ((n < 0) || (n >= (int)sizeof(buf))) {
+               RTE_LOG(ERR, EAL, "%s(): rte_snprintf failed\n", __func__);
+               goto error;
+       }
+       if (fwrite(buf, n, 1, f) == 0)
+               goto error;
+
+       fclose(f);
+       return 0;
+
+error:
+       fclose(f);
+       return -1;
+}
+
+/*
+ * If vendor/device ID match, call the devinit() function of the
+ * driver.
+ */
+int
+rte_eal_pci_probe_one_driver(struct rte_pci_driver *dr, struct rte_pci_device *dev)
+{
+       struct rte_pci_id *id_table;
+       const char *module_name = NULL;
+       int ret;
+
+       if (dr->drv_flags & RTE_PCI_DRV_NEED_IGB_UIO)
+               module_name = IGB_UIO_NAME;
+
+       ret = pci_uio_check_module(module_name);
+       if (ret != 0)
+               rte_exit(1, "The %s module is required by the %s driver\n",
+                               module_name, dr->name);
+
+       for (id_table = dr->id_table ; id_table->vendor_id != 0; id_table++) {
+
+               /* check if device's identifiers match the driver's ones */
+               if (id_table->vendor_id != dev->id.vendor_id &&
+                   id_table->vendor_id != PCI_ANY_ID)
+                       continue;
+               if (id_table->device_id != dev->id.device_id &&
+                   id_table->device_id != PCI_ANY_ID)
+                       continue;
+               if (id_table->subsystem_vendor_id != dev->id.subsystem_vendor_id &&
+                   id_table->subsystem_vendor_id != PCI_ANY_ID)
+                       continue;
+               if (id_table->subsystem_device_id != dev->id.subsystem_device_id &&
+                   id_table->subsystem_device_id != PCI_ANY_ID)
+                       continue;
+
+               RTE_LOG(DEBUG, EAL, "probe driver: %x:%x %s\n",
+               dev->id.vendor_id, dev->id.device_id, dr->name);
+
+               /* Unbind PCI devices if needed */
+               if (module_name != NULL) {
+                       if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+                       /* unbind current driver, bind ours */
+                               if (pci_unbind_kernel_driver(dev) < 0)
+                                       return -1;
+                               if (pci_uio_bind_device(dev, module_name) < 0)
+                                       return -1;
+                       }
+                       /* map the NIC resources */
+                       if (pci_uio_map_resource(dev) < 0)
+                               return -1;
+               }
+               /* call the driver devinit() function */
+               return dr->devinit(dr, dev);
+
+       }
+       return -1;
+}
+
+/* Init the PCI EAL subsystem */
+int
+rte_eal_pci_init(void)
+{
+       TAILQ_INIT(&driver_list);
+       TAILQ_INIT(&device_list);
+       uio_res_list = RTE_TAILQ_RESERVE("PCI_RESOURCE_LIST", uio_res_list);
+
+       /* for debug purposes, PCI can be disabled */
+       if (internal_config.no_pci)
+               return 0;
+
+       if (pci_scan() < 0) {
+               RTE_LOG(ERR, EAL, "%s(): Cannot scan PCI bus\n", __func__);
+               return -1;
+       }
+       return 0;
+}
diff --git a/lib/librte_eal/linuxapp/eal/eal_thread.c b/lib/librte_eal/linuxapp/eal/eal_thread.c
new file mode 100644 (file)
index 0000000..7409d28
--- /dev/null
@@ -0,0 +1,237 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_per_lcore.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+
+#include "eal_private.h"
+#include "eal_thread.h"
+
+RTE_DEFINE_PER_LCORE(unsigned, _lcore_id);
+
+/*
+ * Send a message to a slave lcore identified by slave_id to call a
+ * function f with argument arg. Once the execution is done, the
+ * remote lcore switch in FINISHED state.
+ */
+int
+rte_eal_remote_launch(int (*f)(void *), void *arg, unsigned slave_id)
+{
+       int n;
+       char c = 0;
+       int m2s = lcore_config[slave_id].pipe_master2slave[1];
+       int s2m = lcore_config[slave_id].pipe_slave2master[0];
+
+       if (lcore_config[slave_id].state != WAIT)
+               return -EBUSY;
+
+       lcore_config[slave_id].f = f;
+       lcore_config[slave_id].arg = arg;
+
+       /* send message */
+       n = 0;
+       while (n == 0 || (n < 0 && errno == EINTR))
+               n = write(m2s, &c, 1);
+       if (n < 0)
+               rte_panic("cannot write on configuration pipe\n");
+
+       /* wait ack */
+       n = 0;
+       do {
+               n = read(s2m, &c, 1);
+       } while (n < 0 && errno == EINTR);
+
+       if (n <= 0)
+               rte_panic("cannot read on configuration pipe\n");
+
+       return 0;
+}
+
+/* set affinity for current thread */
+static int
+eal_thread_set_affinity(void)
+{
+       int s;
+       pthread_t thread;
+
+/*
+ * According to the section VERSIONS of the CPU_ALLOC man page:
+ *
+ * The CPU_ZERO(), CPU_SET(), CPU_CLR(), and CPU_ISSET() macros were added
+ * in glibc 2.3.3.
+ *
+ * CPU_COUNT() first appeared in glibc 2.6.
+ *
+ * CPU_AND(),     CPU_OR(),     CPU_XOR(),    CPU_EQUAL(),    CPU_ALLOC(),
+ * CPU_ALLOC_SIZE(), CPU_FREE(), CPU_ZERO_S(),  CPU_SET_S(),  CPU_CLR_S(),
+ * CPU_ISSET_S(),  CPU_AND_S(), CPU_OR_S(), CPU_XOR_S(), and CPU_EQUAL_S()
+ * first appeared in glibc 2.7.
+ */
+#if defined(CPU_ALLOC)
+       size_t size;
+       cpu_set_t *cpusetp;
+
+       cpusetp = CPU_ALLOC(RTE_MAX_LCORE);
+       if (cpusetp == NULL) {
+               RTE_LOG(ERR, EAL, "CPU_ALLOC failed\n");
+               return -1;
+       }
+
+       size = CPU_ALLOC_SIZE(RTE_MAX_LCORE);
+       CPU_ZERO_S(size, cpusetp);
+       CPU_SET_S(rte_lcore_id(), size, cpusetp);
+
+       thread = pthread_self();
+       s = pthread_setaffinity_np(thread, size, cpusetp);
+       if (s != 0) {
+               RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+               CPU_FREE(cpusetp);
+               return -1;
+       }
+
+       CPU_FREE(cpusetp);
+#else /* CPU_ALLOC */
+       cpu_set_t cpuset;
+       CPU_ZERO( &cpuset );
+       CPU_SET( rte_lcore_id(), &cpuset );
+
+       thread = pthread_self();
+       s = pthread_setaffinity_np(thread, sizeof( cpuset ), &cpuset);
+       if (s != 0) {
+               RTE_LOG(ERR, EAL, "pthread_setaffinity_np failed\n");
+               return -1;
+       }
+#endif
+       return 0;
+}
+
+void eal_thread_init_master(unsigned lcore_id)
+{
+       /* set the lcore ID in per-lcore memory area */
+       RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+       /* set CPU affinity */
+       if (eal_thread_set_affinity() < 0)
+               rte_panic("cannot set affinity\n");
+}
+
+/* main loop of threads */
+__attribute__((noreturn)) void *
+eal_thread_loop(__attribute__((unused)) void *arg)
+{
+       char c;
+       int n, ret;
+       unsigned lcore_id;
+       pthread_t thread_id;
+       int m2s, s2m;
+
+       thread_id = pthread_self();
+
+       /* retrieve our lcore_id from the configuration structure */
+       RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+               if (thread_id == lcore_config[lcore_id].thread_id)
+                       break;
+       }
+       if (lcore_id == RTE_MAX_LCORE)
+               rte_panic("cannot retrieve lcore id\n");
+
+       RTE_LOG(DEBUG, EAL, "Core %u is ready (tid=%x)\n",
+               lcore_id, (int)thread_id);
+
+       m2s = lcore_config[lcore_id].pipe_master2slave[0];
+       s2m = lcore_config[lcore_id].pipe_slave2master[1];
+
+       /* set the lcore ID in per-lcore memory area */
+       RTE_PER_LCORE(_lcore_id) = lcore_id;
+
+       /* set CPU affinity */
+       if (eal_thread_set_affinity() < 0)
+               rte_panic("cannot set affinity\n");
+
+       /* read on our pipe to get commands */
+       while (1) {
+               void *fct_arg;
+
+               /* wait command */
+               n = 0;
+               do {
+                       n = read(m2s, &c, 1);
+               } while (n < 0 && errno == EINTR);
+
+               if (n <= 0)
+                       rte_panic("cannot read on configuration pipe\n");
+
+               lcore_config[lcore_id].state = RUNNING;
+
+               /* send ack */
+               n = 0;
+               while (n == 0 || (n < 0 && errno == EINTR))
+                       n = write(s2m, &c, 1);
+               if (n < 0)
+                       rte_panic("cannot write on configuration pipe\n");
+
+               if (lcore_config[lcore_id].f == NULL)
+                       rte_panic("NULL function pointer\n");
+
+               /* call the function and store the return value */
+               fct_arg = lcore_config[lcore_id].arg;
+               ret = lcore_config[lcore_id].f(fct_arg);
+               lcore_config[lcore_id].ret = ret;
+               rte_wmb();
+               lcore_config[lcore_id].state = FINISHED;
+       }
+
+       /* never reached */
+       /* pthread_exit(NULL); */
+       /* return NULL; */
+}
diff --git a/lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h b/lib/librte_eal/linuxapp/eal/include/eal_fs_paths.h
new file mode 100644 (file)
index 0000000..9c5ffb2
--- /dev/null
@@ -0,0 +1,96 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ * Paths used for storing hugepage and config info for multi-process support.
+ */
+
+#ifndef _EAL_LINUXAPP_FS_PATHS_H
+#define _EAL_LINUXAPP_FS_PATHS_H
+
+/** Path of rte config file. */
+#define RUNTIME_CONFIG_FMT "%s/.%s_config"
+
+static const char *default_config_dir = "/var/run";
+
+static inline const char *
+eal_runtime_config_path(void)
+{
+       static char buffer[PATH_MAX]; /* static so auto-zeroed */
+       const char *directory = default_config_dir;
+       const char *home_dir = getenv("HOME");
+
+       if (getuid() != 0 && home_dir != NULL)
+               directory = home_dir;
+       rte_snprintf(buffer, sizeof(buffer) - 1, RUNTIME_CONFIG_FMT, directory,
+                       internal_config.hugefile_prefix);
+       return buffer;
+}
+
+/** Path of hugepage info file. */
+#define HUGEPAGE_INFO_FMT "%s/.%s_hugepage_info"
+
+static inline const char *
+eal_hugepage_info_path(void)
+{
+       static char buffer[PATH_MAX]; /* static so auto-zeroed */
+       const char *directory = default_config_dir;
+       const char *home_dir = getenv("HOME");
+
+       if (getuid() != 0 && home_dir != NULL)
+               directory = home_dir;
+       rte_snprintf(buffer, sizeof(buffer) - 1, HUGEPAGE_INFO_FMT, directory,
+                       internal_config.hugefile_prefix);
+       return buffer;
+}
+
+/** String format for hugepage map files. */
+#define HUGEFILE_FMT "%s/%smap_%d"
+
+static inline const char *
+eal_get_hugefile_path(char *buffer, size_t buflen, const char *hugedir, int f_id)
+{
+       rte_snprintf(buffer, buflen, HUGEFILE_FMT, hugedir,
+                       internal_config.hugefile_prefix, f_id);
+       buffer[buflen - 1] = '\0';
+       return buffer;
+}
+
+/** define the default filename prefix for the %s values above */
+#define HUGEFILE_PREFIX_DEFAULT "rte"
+
+
+#endif /* _EAL_LINUXAPP_FS_PATHS_H */
diff --git a/lib/librte_eal/linuxapp/eal/include/eal_hugepages.h b/lib/librte_eal/linuxapp/eal/include/eal_hugepages.h
new file mode 100644 (file)
index 0000000..2c7d646
--- /dev/null
@@ -0,0 +1,62 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef RTE_LINUXAPP_HUGEPAGES_H_
+#define RTE_LINUXAPP_HUGEPAGES_H_
+
+#define MAX_HUGEPAGE_PATH PATH_MAX
+
+/**
+ * Structure used to store informations about hugepages that we mapped
+ * through the files in hugetlbfs.
+ */
+struct hugepage {
+       void *orig_va;      /**< virtual addr of first mmap() */
+       void *final_va;     /**< virtual addr of 2nd mmap() */
+       uint64_t physaddr;  /**< physical addr */
+       uint64_t size;      /**< the page size */
+       int socket_id;      /**< NUMA socket ID */
+       int file_id;        /**< the '%d' in HUGEFILE_FMT */
+       int memseg_id;      /**< the memory segment to which page belongs */
+       char filepath[MAX_HUGEPAGE_PATH]; /**< Path to backing file on filesystem */
+};
+
+/**
+ * Read the information from linux on what hugepages are available
+ * for the EAL to use
+ */
+int eal_hugepage_info_init(void);
+
+#endif /* EAL_HUGEPAGES_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h b/lib/librte_eal/linuxapp/eal/include/eal_internal_cfg.h
new file mode 100644 (file)
index 0000000..70d5afb
--- /dev/null
@@ -0,0 +1,76 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/**
+ * @file
+ * Holds the structures for the eal internal configuration
+ */
+
+#ifndef _EAL_LINUXAPP_INTERNAL_CFG
+#define _EAL_LINUXAPP_INTERNAL_CFG
+
+#define MAX_HUGEPAGE_SIZES 3  /**< support up to 3 page sizes */
+
+/*
+ * internal configuration structure for the number, size and
+ * mount points of hugepages
+ */
+struct hugepage_info {
+       uint64_t hugepage_sz;       /**< size of a huge page */
+       const char *hugedir;        /**< dir where hugetlbfs is mounted */
+       uint32_t num_pages;         /**< number of hugepages of that size */
+};
+
+/**
+ * internal configuration
+ */
+struct internal_config {
+       volatile uint64_t memory;         /* amount of asked memory */
+       volatile unsigned force_nchannel; /* force number of channels */
+       volatile unsigned force_nrank;    /* force number of ranks */
+       volatile unsigned no_hugetlbfs;   /* true to disable hugetlbfs */
+       volatile unsigned no_pci;         /* true to disable PCI */
+       volatile unsigned no_hpet;        /* true to disable HPET */
+       volatile unsigned no_shconf;      /* true if there is no shared config */
+       volatile enum rte_proc_type_t process_type; /* multi-process proc type */
+       const char *hugefile_prefix;      /* the base filename of hugetlbfs files */
+       const char *hugepage_dir;         /* specific hugetlbfs directory to use */
+
+       unsigned num_hugepage_sizes;      /* how many sizes on this system */
+       struct hugepage_info hugepage_info[MAX_HUGEPAGE_SIZES];
+};
+extern struct internal_config internal_config; /**< Global EAL configuration. */
+
+#endif
diff --git a/lib/librte_eal/linuxapp/eal/include/eal_thread.h b/lib/librte_eal/linuxapp/eal/include/eal_thread.h
new file mode 100644 (file)
index 0000000..a04a35e
--- /dev/null
@@ -0,0 +1,55 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _EAL_LINUXAPP_THREAD_H_
+#define _EAL_LINUXAPP_THREAD_H_
+
+/**
+ * basic loop of thread, called for each thread by eal_init().
+ *
+ * @param arg
+ *   opaque pointer
+ */
+__attribute__((noreturn)) void *eal_thread_loop(void *arg);
+
+/**
+ * Init per-lcore info for master thread
+ *
+ * @param lcore_id
+ *   identifier of master lcore
+ */
+void eal_thread_init_master(unsigned lcore_id);
+
+#endif /* _EAL_LINUXAPP_PRIVATE_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_interrupts.h
new file mode 100644 (file)
index 0000000..15ca209
--- /dev/null
@@ -0,0 +1,56 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_INTERRUPTS_H_
+#error "don't include this file directly, please include generic <rte_interrupts.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_INTERRUPTS_H_
+#define _RTE_LINUXAPP_INTERRUPTS_H_
+
+enum rte_intr_handle_type {
+       RTE_INTR_HANDLE_UNKNOWN = 0,
+       RTE_INTR_HANDLE_UIO,      /**< uio device handle */
+       RTE_INTR_HANDLE_ALARM,    /**< alarm handle */
+       RTE_INTR_HANDLE_MAX
+};
+
+/** Handle for interrupts. */
+struct rte_intr_handle {
+       int fd;                          /**< file descriptor */
+       enum rte_intr_handle_type type;  /**< handle type */
+};
+
+#endif /* _RTE_LINUXAPP_INTERRUPTS_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_lcore.h
new file mode 100644 (file)
index 0000000..4f14cbb
--- /dev/null
@@ -0,0 +1,92 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_LCORE_H_
+#error "don't include this file directly, please include generic <rte_lcore.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_LCORE_H_
+#define _RTE_LINUXAPP_LCORE_H_
+
+/**
+ * @file
+ * API for lcore and socket manipulation in linuxapp environment
+ */
+
+/**
+ * structure storing internal configuration (per-lcore)
+ */
+struct lcore_config {
+       unsigned detected;         /**< true if lcore was detected */
+       pthread_t thread_id;       /**< pthread identifier */
+       int pipe_master2slave[2];  /**< communication pipe with master */
+       int pipe_slave2master[2];  /**< communication pipe with master */
+       lcore_function_t * volatile f;         /**< function to call */
+       void * volatile arg;       /**< argument of function */
+       volatile int ret;          /**< return value of function */
+       volatile enum rte_lcore_state_t state; /**< lcore state */
+       unsigned socket_id;        /**< physical socket id for this lcore */
+};
+
+/**
+ * internal configuration (per-lcore)
+ */
+extern struct lcore_config lcore_config[RTE_MAX_LCORE];
+
+/**
+ * Return the ID of the physical socket of the logical core we are
+ * running on.
+ */
+static inline unsigned
+rte_socket_id(void)
+{
+       return lcore_config[rte_lcore_id()].socket_id;
+}
+
+/**
+ * Get the ID of the physical socket of the specified lcore
+ *
+ * @param lcore_id
+ *   the targeted lcore, which MUST be between 0 and RTE_MAX_LCORE-1.
+ * @return
+ *   the ID of lcoreid's physical socket
+ */
+static inline unsigned
+rte_lcore_to_socket_id(unsigned lcore_id)
+{
+       return lcore_config[lcore_id].socket_id;
+}
+
+#endif /* _RTE_LCORE_H_ */
diff --git a/lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h b/lib/librte_eal/linuxapp/eal/include/exec-env/rte_per_lcore.h
new file mode 100644 (file)
index 0000000..781cfed
--- /dev/null
@@ -0,0 +1,69 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_PER_LCORE_H_
+#error "don't include this file directly, please include generic <rte_per_lcore.h>"
+#endif
+
+#ifndef _RTE_LINUXAPP_PER_LCORE_H_
+#define _RTE_LINUXAPP_PER_LCORE_H_
+
+/**
+ * @file
+ * Per-lcore variables in RTE on linuxapp environment
+ */
+
+#include <pthread.h>
+
+/**
+ * Macro to define a per lcore variable "var" of type "type", don't
+ * use keywords like "static" or "volatile" in type, just prefix the
+ * whole macro.
+ */
+#define RTE_DEFINE_PER_LCORE(type, name)                       \
+       __thread __typeof__(type) per_lcore_##name
+
+/**
+ * Macro to declare an extern per lcore variable "var" of type "type"
+ */
+#define RTE_DECLARE_PER_LCORE(type, name)                      \
+       extern __thread __typeof__(type) per_lcore_##name
+
+/**
+ * Read/write the per-lcore variable value
+ */
+#define RTE_PER_LCORE(name) (per_lcore_##name)
+
+#endif /* _RTE_LINUXAPP_PER_LCORE_H_ */
diff --git a/lib/librte_eal/linuxapp/igb_uio/Makefile b/lib/librte_eal/linuxapp/igb_uio/Makefile
new file mode 100644 (file)
index 0000000..f52aa7f
--- /dev/null
@@ -0,0 +1,55 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# module name and path
+#
+MODULE = igb_uio
+MODULE_PATH = drivers/net/igb_uio
+
+#
+# CFLAGS
+#
+MODULE_CFLAGS += -I$(SRCDIR) --param max-inline-insns-single=50
+MODULE_CFLAGS += -I$(RTE_OUTPUT)/include
+MODULE_CFLAGS += -Winline -Wall -Werror
+MODULE_CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := igb_uio.c
+
+include $(RTE_SDK)/mk/rte.module.mk
diff --git a/lib/librte_eal/linuxapp/igb_uio/igb_uio.c b/lib/librte_eal/linuxapp/igb_uio/igb_uio.c
new file mode 100644 (file)
index 0000000..51733f6
--- /dev/null
@@ -0,0 +1,402 @@
+/*-
+ *
+ * Copyright (c) 2010-2012, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * GNU GPL V2: http://www.gnu.org/licenses/old-licenses/gpl-2.0.html
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/uio_driver.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+#include <linux/version.h>
+
+/* Some function names changes between 3.2.0 and 3.3.0... */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
+#define PCI_LOCK pci_block_user_cfg_access
+#define PCI_UNLOCK pci_unblock_user_cfg_access
+#else
+#define PCI_LOCK pci_cfg_access_lock
+#define PCI_UNLOCK pci_cfg_access_unlock
+#endif
+
+/**
+ * MSI-X related macros, copy from linux/pci_regs.h in kernel 2.6.39,
+ * but none of them in kernel 2.6.35.
+ */
+#ifndef PCI_MSIX_ENTRY_SIZE
+#define PCI_MSIX_ENTRY_SIZE             16
+#define PCI_MSIX_ENTRY_LOWER_ADDR       0
+#define PCI_MSIX_ENTRY_UPPER_ADDR       4
+#define PCI_MSIX_ENTRY_DATA             8
+#define PCI_MSIX_ENTRY_VECTOR_CTRL      12
+#define PCI_MSIX_ENTRY_CTRL_MASKBIT     1
+#endif
+
+#define IGBUIO_NUM_MSI_VECTORS 1
+
+/* interrupt mode */
+enum igbuio_intr_mode {
+       IGBUIO_LEGACY_INTR_MODE = 0,
+       IGBUIO_MSI_INTR_MODE,
+       IGBUIO_MSIX_INTR_MODE,
+       IGBUIO_INTR_MODE_MAX
+};
+
+/**
+ * A structure describing the private information for a uio device.
+ */
+struct rte_uio_pci_dev {
+       struct uio_info info;
+       struct pci_dev *pdev;
+       spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */
+       enum igbuio_intr_mode mode;
+       struct msix_entry \
+               msix_entries[IGBUIO_NUM_MSI_VECTORS]; /* pointer to the msix vectors to be allocated later */
+};
+
+static const enum igbuio_intr_mode igbuio_intr_mode_preferred = IGBUIO_MSIX_INTR_MODE;
+
+/* PCI device id table */
+static struct pci_device_id igbuio_pci_ids[] = {
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {PCI_DEVICE(vend, dev)},
+#include <rte_pci_dev_ids.h>
+{ 0, },
+};
+
+static inline struct rte_uio_pci_dev *
+igbuio_get_uio_pci_dev(struct uio_info *info)
+{
+       return container_of(info, struct rte_uio_pci_dev, info);
+}
+
+/**
+ * It masks the msix on/off of generating MSI-X messages.
+ */
+static int
+igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
+{
+       uint32_t mask_bits = desc->masked;
+       unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+                                               PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+       if (state != 0)
+               mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+       else
+               mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+
+       if (mask_bits != desc->masked) {
+               writel(mask_bits, desc->mask_base + offset);
+               readl(desc->mask_base);
+               desc->masked = mask_bits;
+       }
+
+       return 0;
+}
+
+/**
+ * This function sets/clears the masks for generating LSC interrupts.
+ *
+ * @param info
+ *   The pointer to struct uio_info.
+ * @param on
+ *   The on/off flag of masking LSC.
+ * @return
+ *   -On success, zero value.
+ *   -On failure, a negative value.
+ */
+static int
+igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state)
+{
+       struct pci_dev *pdev = udev->pdev;
+
+       if (udev->mode == IGBUIO_MSIX_INTR_MODE) {
+               struct msi_desc *desc;
+
+               list_for_each_entry(desc, &pdev->msi_list, list) {
+                       igbuio_msix_mask_irq(desc, state);
+               }
+       }
+       else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
+               uint32_t status;
+               uint16_t old, new;
+
+               pci_read_config_dword(pdev, PCI_COMMAND, &status);
+               old = status;
+               if (state != 0)
+                       new = old & (~PCI_COMMAND_INTX_DISABLE);
+               else
+                       new = old | PCI_COMMAND_INTX_DISABLE;
+
+               if (old != new)
+                       pci_write_config_word(pdev, PCI_COMMAND, new);
+       }
+
+       return 0;
+}
+
+/**
+ * This is the irqcontrol callback to be registered to uio_info.
+ * It can be used to disable/enable interrupt from user space processes.
+ *
+ * @param info
+ *  pointer to uio_info.
+ * @param irq_state
+ *  state value. 1 to enable interrupt, 0 to disable interrupt.
+ *
+ * @return
+ *  - On success, 0.
+ *  - On failure, a negative value.
+ */
+static int
+igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
+{
+       unsigned long flags;
+       struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+       struct pci_dev *pdev = udev->pdev;
+
+       spin_lock_irqsave(&udev->lock, flags);
+       PCI_LOCK(pdev);
+
+       igbuio_set_interrupt_mask(udev, irq_state);
+
+       PCI_UNLOCK(pdev);
+       spin_unlock_irqrestore(&udev->lock, flags);
+
+       return 0;
+}
+
+/**
+ * This is interrupt handler which will check if the interrupt is for the right device.
+ * If yes, disable it here and will be enable later.
+ */
+static irqreturn_t
+igbuio_pci_irqhandler(int irq, struct uio_info *info)
+{
+       irqreturn_t ret = IRQ_NONE;
+       unsigned long flags;
+       struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
+       struct pci_dev *pdev = udev->pdev;
+       uint32_t cmd_status_dword;
+       uint16_t status;
+
+       spin_lock_irqsave(&udev->lock, flags);
+       /* block userspace PCI config reads/writes */
+       PCI_LOCK(pdev);
+
+       /* for legacy mode, interrupt maybe shared */
+       if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
+               pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
+               status = cmd_status_dword >> 16;
+               /* interrupt is not ours, goes to out */
+               if (!(status & PCI_STATUS_INTERRUPT))
+                       goto done;
+       }
+
+       igbuio_set_interrupt_mask(udev, 0);
+       ret = IRQ_HANDLED;
+done:
+       /* unblock userspace PCI config reads/writes */
+       PCI_UNLOCK(pdev);
+       spin_unlock_irqrestore(&udev->lock, flags);
+       printk(KERN_INFO "irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled");
+
+       return ret;
+}
+
+/* Remap pci resources described by bar #pci_bar in uio resource n. */
+static int
+igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
+                      int n, int pci_bar, const char *name)
+{
+       unsigned long addr, len;
+       void *internal_addr;
+
+       addr = pci_resource_start(dev, pci_bar);
+       len = pci_resource_len(dev, pci_bar);
+       if (addr == 0 || len == 0)
+               return -1;
+       internal_addr = ioremap(addr, len);
+       if (internal_addr == NULL)
+               return -1;
+       info->mem[n].name = name;
+       info->mem[n].addr = addr;
+       info->mem[n].internal_addr = internal_addr;
+       info->mem[n].size = len;
+       info->mem[n].memtype = UIO_MEM_PHYS;
+       return 0;
+}
+
+/* Unmap previously ioremap'd resources */
+static void
+igbuio_pci_release_iomem(struct uio_info *info)
+{
+       int i;
+       for (i = 0; i < MAX_UIO_MAPS; i++) {
+               if (info->mem[i].internal_addr)
+                       iounmap(info->mem[i].internal_addr);
+       }
+}
+
+static int __devinit
+igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+       struct rte_uio_pci_dev *udev;
+
+       udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
+       if (!udev)
+               return -ENOMEM;
+
+       /*
+        * enable device: ask low-level code to enable I/O and
+        * memory
+        */
+       if (pci_enable_device(dev)) {
+               printk(KERN_ERR "Cannot enable PCI device\n");
+               goto fail_free;
+       }
+
+       /* XXX should we use 64 bits ? */
+       /* set 32-bit DMA mask */
+       if (pci_set_dma_mask(dev,(uint64_t)0xffffffff)) {
+               printk(KERN_ERR "Cannot set DMA mask\n");
+               goto fail_disable;
+       }
+
+       /*
+        * reserve device's PCI memory regions for use by this
+        * module
+        */
+       if (pci_request_regions(dev, "igb_uio")) {
+               printk(KERN_ERR "Cannot request regions\n");
+               goto fail_disable;
+       }
+
+       /* enable bus mastering on the device */
+       pci_set_master(dev);
+
+       /* remap IO memory */
+       if (igbuio_pci_setup_iomem(dev, &udev->info, 0, 0, "config"))
+               goto fail_release_regions;
+
+       /* fill uio infos */
+       udev->info.name = "Intel IGB UIO";
+       udev->info.version = "0.1";
+       udev->info.handler = igbuio_pci_irqhandler;
+       udev->info.irqcontrol = igbuio_pci_irqcontrol;
+       udev->info.priv = udev;
+       udev->pdev = dev;
+       udev->mode = 0; /* set the default value for interrupt mode */
+       spin_lock_init(&udev->lock);
+
+       /* check if it need to try msix first */
+       if (igbuio_intr_mode_preferred == IGBUIO_MSIX_INTR_MODE) {
+               int vector;
+
+               for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector ++)
+                       udev->msix_entries[vector].entry = vector;
+
+               if (pci_enable_msix(udev->pdev, udev->msix_entries, IGBUIO_NUM_MSI_VECTORS) == 0) {
+                       udev->mode = IGBUIO_MSIX_INTR_MODE;
+               }
+               else {
+                       pci_disable_msix(udev->pdev);
+                       printk(KERN_INFO "fail to enable pci msix, or not enough msix entries\n");
+               }
+       }
+       switch (udev->mode) {
+       case IGBUIO_MSIX_INTR_MODE:
+               udev->info.irq_flags = 0;
+               udev->info.irq = udev->msix_entries[0].vector;
+               break;
+       case IGBUIO_MSI_INTR_MODE:
+               break;
+       case IGBUIO_LEGACY_INTR_MODE:
+               udev->info.irq_flags = IRQF_SHARED;
+               udev->info.irq = dev->irq;
+               break;
+       default:
+               break;
+       }
+
+       pci_set_drvdata(dev, udev);
+       igbuio_pci_irqcontrol(&udev->info, 0);
+
+       /* register uio driver */
+       if (uio_register_device(&dev->dev, &udev->info))
+               goto fail_release_iomem;
+
+       printk(KERN_INFO "uio device registered with irq %lx\n", udev->info.irq);
+
+       return 0;
+
+fail_release_iomem:
+       igbuio_pci_release_iomem(&udev->info);
+       if (udev->mode == IGBUIO_MSIX_INTR_MODE)
+               pci_disable_msix(udev->pdev);
+fail_release_regions:
+       pci_release_regions(dev);
+fail_disable:
+       pci_disable_device(dev);
+fail_free:
+       kfree(udev);
+
+       return -ENODEV;
+}
+
+static void
+igbuio_pci_remove(struct pci_dev *dev)
+{
+       struct uio_info *info = pci_get_drvdata(dev);
+
+       uio_unregister_device(info);
+       if (((struct rte_uio_pci_dev *)info->priv)->mode == IGBUIO_MSIX_INTR_MODE)
+               pci_disable_msix(dev);
+       pci_release_regions(dev);
+       pci_disable_device(dev);
+       pci_set_drvdata(dev, NULL);
+       kfree(info);
+}
+
+static struct pci_driver igbuio_pci_driver = {
+       .name = "igb_uio",
+       .id_table = igbuio_pci_ids,
+       .probe = igbuio_pci_probe,
+       .remove = igbuio_pci_remove,
+};
+
+static int __init
+igbuio_pci_init_module(void)
+{
+       return pci_register_driver(&igbuio_pci_driver);
+}
+
+static void __exit
+igbuio_pci_exit_module(void)
+{
+       pci_unregister_driver(&igbuio_pci_driver);
+}
+
+module_init(igbuio_pci_init_module);
+module_exit(igbuio_pci_exit_module);
+
+MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Intel Corporation");
diff --git a/lib/librte_ether/Makefile b/lib/librte_ether/Makefile
new file mode 100644 (file)
index 0000000..ce01fda
--- /dev/null
@@ -0,0 +1,55 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = libethdev.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+SRCS-y += rte_ethdev.c
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_ether.h
+SYMLINK-y-include += rte_ethdev.h
+
+# this lib depends upon:
+DEPDIRS-y += lib/librte_eal lib/librte_mempool lib/librte_ring lib/librte_mbuf
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c
new file mode 100644 (file)
index 0000000..a7a7e68
--- /dev/null
@@ -0,0 +1,1381 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <stdint.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_errno.h>
+#include <rte_spinlock.h>
+
+#include "rte_ether.h"
+#include "rte_ethdev.h"
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#define PMD_DEBUG_TRACE(fmt, args...) do {                        \
+               RTE_LOG(ERR, PMD, "%s: " fmt, __func__, ## args); \
+       } while (0)
+#else
+#define PMD_DEBUG_TRACE(fmt, args...)
+#endif
+
+/* define two macros for quick checking for restricting functions to primary
+ * instance only. First macro is for functions returning an int - and therefore
+ * an error code, second macro is for functions returning null.
+ */
+#define PROC_PRIMARY_OR_ERR() do { \
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+               PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
+                                                       __func__); \
+                       return (-E_RTE_SECONDARY); \
+               } \
+} while(0)
+
+#define PROC_PRIMARY_OR_RET() do { \
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) { \
+               PMD_DEBUG_TRACE("Cannot run %s in secondary processes\n", \
+                                                       __func__); \
+               return; \
+       } \
+} while(0)
+
+/* Macros to check for invlaid function pointers in dev_ops structure */
+#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
+       if ((func) == NULL) { \
+               PMD_DEBUG_TRACE("Function not supported\n"); \
+               return (retval); \
+       } \
+} while(0)
+#define FUNC_PTR_OR_RET(func) do { \
+       if ((func) == NULL) { \
+               PMD_DEBUG_TRACE("Function not supported\n"); \
+               return; \
+       } \
+} while(0)
+
+static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
+struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
+static struct rte_eth_dev_data *rte_eth_dev_data = NULL;
+static uint8_t nb_ports = 0;
+
+/* spinlock for eth device callbacks */
+static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
+/**
+ * The user application callback description.
+ *
+ * It contains callback address to be registered by user application,
+ * the pointer to the parameters for callback, and the event type.
+ */
+struct rte_eth_dev_callback {
+       TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
+       rte_eth_dev_cb_fn cb_fn;                /**< Callback address */
+       void *cb_arg;                           /**< Parameter for callback */
+       enum rte_eth_event_type event;          /**< Interrupt event type */
+};
+
+static inline void
+rte_eth_dev_data_alloc(void)
+{
+       const unsigned flags = 0;
+       const struct rte_memzone *mz;
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+               mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
+                               RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data),
+                               rte_socket_id(), flags);
+       } else
+               mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+       if (mz == NULL)
+               rte_panic("Cannot allocate memzone for ethernet port data\n");
+
+       rte_eth_dev_data = mz->addr;
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               memset(rte_eth_dev_data, 0,
+                               RTE_MAX_ETHPORTS * sizeof(*rte_eth_dev_data));
+}
+
+static inline struct rte_eth_dev *
+rte_eth_dev_allocate(void)
+{
+       struct rte_eth_dev *eth_dev;
+
+       if (nb_ports == RTE_MAX_ETHPORTS)
+               return NULL;
+
+       if (rte_eth_dev_data == NULL)
+               rte_eth_dev_data_alloc();
+
+       eth_dev = &rte_eth_devices[nb_ports];
+       eth_dev->data = &rte_eth_dev_data[nb_ports];
+       eth_dev->data->port_id = nb_ports++;
+       return eth_dev;
+}
+
+static int
+rte_eth_dev_init(struct rte_pci_driver *pci_drv,
+                struct rte_pci_device *pci_dev)
+{
+       struct eth_driver    *eth_drv;
+       struct rte_eth_dev *eth_dev;
+       int diag;
+
+       eth_drv = (struct eth_driver *)pci_drv;
+
+       eth_dev = rte_eth_dev_allocate();
+       if (eth_dev == NULL)
+               return -ENOMEM;
+
+
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY){
+               eth_dev->data->dev_private = rte_zmalloc("ethdev private structure",
+                                 eth_drv->dev_private_size,
+                                 CACHE_LINE_SIZE);
+               if (eth_dev->data->dev_private == NULL)
+                       return -ENOMEM;
+       }
+       eth_dev->pci_dev = pci_dev;
+       eth_dev->driver = eth_drv;
+       eth_dev->data->rx_mbuf_alloc_failed = 0;
+
+       /* init user callbacks */
+       TAILQ_INIT(&(eth_dev->callbacks));
+
+       /*
+        * Set the default maximum frame size.
+        */
+       eth_dev->data->max_frame_size = ETHER_MAX_LEN;
+
+       /* Invoke PMD device initialization function */
+       diag = (*eth_drv->eth_dev_init)(eth_drv, eth_dev);
+       if (diag == 0)
+               return (0);
+
+       PMD_DEBUG_TRACE("driver %s: eth_dev_init(vendor_id=0x%u device_id=0x%x)"
+                       " failed\n", pci_drv->name,
+                       (unsigned) pci_dev->id.vendor_id,
+                       (unsigned) pci_dev->id.device_id);
+       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+               rte_free(eth_dev->data->dev_private);
+       nb_ports--;
+       return diag;
+}
+
+/**
+ * Register an Ethernet [Poll Mode] driver.
+ *
+ * Function invoked by the initialization function of an Ethernet driver
+ * to simultaneously register itself as a PCI driver and as an Ethernet
+ * Poll Mode Driver.
+ * Invokes the rte_eal_pci_register() function to register the *pci_drv*
+ * structure embedded in the *eth_drv* structure, after having stored the
+ * address of the rte_eth_dev_init() function in the *devinit* field of
+ * the *pci_drv* structure.
+ * During the PCI probing phase, the rte_eth_dev_init() function is
+ * invoked for each PCI [Ethernet device] matching the embedded PCI
+ * identifiers provided by the driver.
+ */
+void
+rte_eth_driver_register(struct eth_driver *eth_drv)
+{
+       eth_drv->pci_drv.devinit = rte_eth_dev_init;
+       rte_eal_pci_register(&eth_drv->pci_drv);
+}
+
+uint8_t
+rte_eth_dev_count(void)
+{
+       return (nb_ports);
+}
+
+int
+rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+                     const struct rte_eth_conf *dev_conf)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+       int diag;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR();
+
+       if (port_id >= nb_ports || port_id >= RTE_MAX_ETHPORTS) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+
+       if (dev->data->dev_started) {
+               PMD_DEBUG_TRACE(
+                   "port %d must be stopped to allow configuration", port_id);
+               return -EBUSY;
+       }
+
+       /*
+        * Check that the numbers of RX and TX queues are not greater
+        * than the maximum number of RX and TX queues supported by the
+        * configured device.
+        */
+       (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+       if (nb_rx_q > dev_info.max_rx_queues) {
+               PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_queues=%d > %d",
+                               port_id, nb_rx_q, dev_info.max_rx_queues);
+               return (-EINVAL);
+       }
+       if (nb_rx_q == 0) {
+               PMD_DEBUG_TRACE("ethdev port_id=%d nb_rx_q == 0", port_id);
+               return (-EINVAL);
+       }
+
+       if (nb_tx_q > dev_info.max_tx_queues) {
+               PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_queues=%d > %d",
+                               port_id, nb_tx_q, dev_info.max_tx_queues);
+               return (-EINVAL);
+       }
+       if (nb_tx_q == 0) {
+               PMD_DEBUG_TRACE("ethdev port_id=%d nb_tx_q == 0", port_id);
+               return (-EINVAL);
+       }
+
+       /* Copy the dev_conf parameter into the dev structure */
+       memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+       /*
+        * If jumbo frames are enabled, check that the maximum RX packet
+        * length is supported by the configured device.
+        */
+       if (dev_conf->rxmode.jumbo_frame == 1) {
+               if (dev_conf->rxmode.max_rx_pkt_len >
+                   dev_info.max_rx_pktlen) {
+                       PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+                               " > max valid value %u",
+                               port_id,
+                               (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+                               (unsigned)dev_info.max_rx_pktlen);
+                       return (-EINVAL);
+               }
+       } else
+               /* Use default value */
+               dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
+
+       /* For vmdb+dcb mode check our configuration before we go further */
+       if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) {
+               const struct rte_eth_vmdq_dcb_conf *conf;
+
+               if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+                       PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
+                                       "!= %d",
+                                       port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+                       return (-EINVAL);
+               }
+               conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
+               if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+                      conf->nb_queue_pools == ETH_32_POOLS)) {
+                   PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+                                   "nb_queue_pools != %d or nb_queue_pools "
+                                   "!= %d",
+                                   port_id, ETH_16_POOLS, ETH_32_POOLS);
+                   return (-EINVAL);
+               }
+       }
+
+       diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q);
+       if (diag != 0) {
+               rte_free(dev->data->rx_queues);
+               rte_free(dev->data->tx_queues);
+       }
+       return diag;
+}
+
+static void
+rte_eth_dev_config_restore(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_info dev_info;
+       struct ether_addr addr;
+       uint16_t i;
+
+       dev = &rte_eth_devices[port_id];
+
+       rte_eth_dev_info_get(port_id, &dev_info);
+
+       /* replay MAC address configuration */
+       for (i = 0; i < dev_info.max_mac_addrs; i++) {
+               addr = dev->data->mac_addrs[i];
+
+               /* skip zero address */
+               if (is_zero_ether_addr(&addr))
+                       continue;
+
+               /* add address to the hardware */
+               if  (*dev->dev_ops->mac_addr_add)
+                       (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0);
+               else {
+                       PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
+                                       port_id);
+                       /* exit the loop but not return an error */
+                       break;
+               }
+       }
+
+       /* replay promiscuous configuration */
+       if (rte_eth_promiscuous_get(port_id) == 1)
+               rte_eth_promiscuous_enable(port_id);
+       else if (rte_eth_promiscuous_get(port_id) == 0)
+               rte_eth_promiscuous_disable(port_id);
+
+       /* replay allmulticast configuration */
+       if (rte_eth_allmulticast_get(port_id) == 1)
+               rte_eth_allmulticast_enable(port_id);
+       else if (rte_eth_allmulticast_get(port_id) == 0)
+               rte_eth_allmulticast_disable(port_id);
+}
+
+int
+rte_eth_dev_start(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+       int diag;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR();
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
+       diag = (*dev->dev_ops->dev_start)(dev);
+       if (diag == 0)
+               dev->data->dev_started = 1;
+       else
+               return diag;
+
+       rte_eth_dev_config_restore(port_id);
+
+       return 0;
+}
+
+void
+rte_eth_dev_stop(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_RET();
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
+       dev->data->dev_started = 0;
+       (*dev->dev_ops->dev_stop)(dev);
+}
+
+void
+rte_eth_dev_close(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_RET();
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+
+       dev = &rte_eth_devices[port_id];
+       FUNC_PTR_OR_RET(*dev->dev_ops->dev_close);
+       dev->data->dev_started = 0;
+       (*dev->dev_ops->dev_close)(dev);
+}
+
+int
+rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+                      uint16_t nb_rx_desc, unsigned int socket_id,
+                      const struct rte_eth_rxconf *rx_conf,
+                      struct rte_mempool *mp)
+{
+       struct rte_eth_dev *dev;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       struct rte_eth_dev_info dev_info;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR();
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+       dev = &rte_eth_devices[port_id];
+       if (rx_queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", rx_queue_id);
+               return (-EINVAL);
+       }
+
+       if (dev->data->dev_started) {
+               PMD_DEBUG_TRACE(
+                   "port %d must be stopped to allow configuration", port_id);
+               return -EBUSY;
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
+
+       /*
+        * Check the size of the mbuf data buffer.
+        * This value must be provided in the private data of the memory pool.
+        * First check that the memory pool has a valid private data.
+        */
+       (*dev->dev_ops->dev_infos_get)(dev, &dev_info);
+       if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
+               PMD_DEBUG_TRACE("%s private_data_size %d < %d\n",
+                               mp->name, (int) mp->private_data_size,
+                               (int) sizeof(struct rte_pktmbuf_pool_private));
+               return (-ENOSPC);
+       }
+       mbp_priv = (struct rte_pktmbuf_pool_private *)
+               ((char *)mp + sizeof(struct rte_mempool));
+       if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
+           dev_info.min_rx_bufsize) {
+               PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
+                               "(RTE_PKTMBUF_HEADROOM=%d + min_rx_bufsize(dev)"
+                               "=%d)\n",
+                               mp->name,
+                               (int)mbp_priv->mbuf_data_room_size,
+                               (int)(RTE_PKTMBUF_HEADROOM +
+                                     dev_info.min_rx_bufsize),
+                               (int)RTE_PKTMBUF_HEADROOM,
+                               (int)dev_info.min_rx_bufsize);
+               return (-EINVAL);
+       }
+
+       return (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
+                                              socket_id, rx_conf, mp);
+}
+
+int
+rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+                      uint16_t nb_tx_desc, unsigned int socket_id,
+                      const struct rte_eth_txconf *tx_conf)
+{
+       struct rte_eth_dev *dev;
+
+       /* This function is only safe when called from the primary process
+        * in a multi-process setup*/
+       PROC_PRIMARY_OR_ERR();
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-EINVAL);
+       }
+       dev = &rte_eth_devices[port_id];
+       if (tx_queue_id >= dev->data->nb_tx_queues) {
+               PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", tx_queue_id);
+               return (-EINVAL);
+       }
+
+       if (dev->data->dev_started) {
+               PMD_DEBUG_TRACE(
+                   "port %d must be stopped to allow configuration", port_id);
+               return -EBUSY;
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
+       return (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc,
+                                              socket_id, tx_conf);
+}
+
+void
+rte_eth_promiscuous_enable(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
+       (*dev->dev_ops->promiscuous_enable)(dev);
+       dev->data->promiscuous = 1;
+}
+
+void
+rte_eth_promiscuous_disable(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
+       dev->data->promiscuous = 0;
+       (*dev->dev_ops->promiscuous_disable)(dev);
+}
+
+int
+rte_eth_promiscuous_get(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return -1;
+       }
+
+       dev = &rte_eth_devices[port_id];
+       return dev->data->promiscuous;
+}
+
+void
+rte_eth_allmulticast_enable(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
+       (*dev->dev_ops->allmulticast_enable)(dev);
+       dev->data->all_multicast = 1;
+}
+
+void
+rte_eth_allmulticast_disable(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
+       dev->data->all_multicast = 0;
+       (*dev->dev_ops->allmulticast_disable)(dev);
+}
+
+int
+rte_eth_allmulticast_get(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return -1;
+       }
+
+       dev = &rte_eth_devices[port_id];
+       return dev->data->all_multicast;
+}
+
+static inline int
+rte_eth_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = link;
+       struct rte_eth_link *src = &(dev->data->dev_link);
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+void
+rte_eth_link_get(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+       FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+
+       if (dev->data->dev_conf.intr_conf.lsc != 0)
+               rte_eth_dev_atomic_read_link_status(dev, eth_link);
+       else {
+               (*dev->dev_ops->link_update)(dev, 1);
+               *eth_link = dev->data->dev_link;
+       }
+}
+
+void
+rte_eth_link_get_nowait(uint8_t port_id, struct rte_eth_link *eth_link)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+       FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+
+       if (dev->data->dev_conf.intr_conf.lsc != 0)
+               rte_eth_dev_atomic_read_link_status(dev, eth_link);
+       else {
+               (*dev->dev_ops->link_update)(dev, 0);
+               *eth_link = dev->data->dev_link;
+       }
+}
+
+void
+rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->stats_get);
+       (*dev->dev_ops->stats_get)(dev, stats);
+       stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
+}
+
+void
+rte_eth_stats_reset(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset);
+       (*dev->dev_ops->stats_reset)(dev);
+}
+
+void
+rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
+       (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+       dev_info->pci_dev = dev->pci_dev;
+       dev_info->driver_name = dev->driver->pci_drv.name;
+}
+
+void
+rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return;
+       }
+       dev = &rte_eth_devices[port_id];
+       ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+}
+
+int
+rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+       dev = &rte_eth_devices[port_id];
+       if (! (dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+               PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
+               return (-ENOSYS);
+       }
+       if (vlan_id > 4095) {
+               PMD_DEBUG_TRACE("(port_id=%d) invalid vlan_id=%u > 4095\n",
+                               port_id, (unsigned) vlan_id);
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
+       (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+       return (0);
+}
+
+int
+rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
+                                     struct rte_fdir_filter *fdir_filter,
+                                     uint8_t queue)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_signature_filter, -ENOTSUP);
+       if (*dev->dev_ops->fdir_add_signature_filter)
+               return (*dev->dev_ops->fdir_add_signature_filter)(dev,
+                                                                 fdir_filter,
+                                                                 queue);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
+                                        struct rte_fdir_filter *fdir_filter,
+                                        uint8_t queue)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_signature_filter, -ENOTSUP);
+       if (*dev->dev_ops->fdir_update_signature_filter)
+               return (*dev->dev_ops->fdir_update_signature_filter)(dev,
+                                                                    fdir_filter,
+                                                                    queue);
+
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
+                                        struct rte_fdir_filter *fdir_filter)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_SIGNATURE) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_signature_filter, -ENOTSUP);
+       if (*dev->dev_ops->fdir_remove_signature_filter)
+               return (*dev->dev_ops->fdir_remove_signature_filter)(dev,
+                                                                    fdir_filter);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (! (dev->data->dev_conf.fdir_conf.mode)) {
+               PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
+               return (-ENOSYS);
+       }
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_infos_get, -ENOTSUP);
+       if (*dev->dev_ops->fdir_infos_get) {
+               (*dev->dev_ops->fdir_infos_get)(dev, fdir);
+               return (0);
+       }
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
+                                   struct rte_fdir_filter *fdir_filter,
+                                   uint16_t soft_id, uint8_t queue,
+                                   uint8_t drop)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       /* For now IPv6 is not supported with perfect filter */
+       if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+               return (-ENOTSUP);
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_add_perfect_filter, -ENOTSUP);
+       if  (*dev->dev_ops->fdir_add_perfect_filter)
+               return (*dev->dev_ops->fdir_add_perfect_filter)(dev, fdir_filter,
+                                                               soft_id, queue,
+                                                               drop);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
+                                      struct rte_fdir_filter *fdir_filter,
+                                      uint16_t soft_id, uint8_t queue,
+                                      uint8_t drop)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       /* For now IPv6 is not supported with perfect filter */
+       if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+               return (-ENOTSUP);
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_update_perfect_filter, -ENOTSUP);
+       if  (*dev->dev_ops->fdir_update_perfect_filter)
+               return (*dev->dev_ops->fdir_update_perfect_filter)(dev,
+                                                                  fdir_filter,
+                                                                  soft_id,
+                                                                  queue,
+                                                                  drop);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
+                                      struct rte_fdir_filter *fdir_filter,
+                                      uint16_t soft_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) {
+               PMD_DEBUG_TRACE("port %d: invalid FDIR mode=%u\n",
+                               port_id, dev->data->dev_conf.fdir_conf.mode);
+               return (-ENOSYS);
+       }
+
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP
+            || fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE)
+           && (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_DEBUG_TRACE(" Port are meaningless for SCTP and " \
+                               "None l4type source & destinations ports " \
+                               "should be null!");
+               return (-EINVAL);
+       }
+
+       /* For now IPv6 is not supported with perfect filter */
+       if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6)
+               return (-ENOTSUP);
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_remove_perfect_filter, -ENOTSUP);
+       if  (*dev->dev_ops->fdir_remove_perfect_filter)
+               return (*dev->dev_ops->fdir_remove_perfect_filter)(dev,
+                                                                  fdir_filter,
+                                                                  soft_id);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n", port_id);
+       return -ENOTSUP;
+}
+
+int
+rte_eth_dev_fdir_set_masks(uint8_t port_id, struct rte_fdir_masks *fdir_mask)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+       if (! (dev->data->dev_conf.fdir_conf.mode)) {
+               PMD_DEBUG_TRACE("port %d: pkt-filter disabled\n", port_id);
+               return (-ENOSYS);
+       }
+
+       /* IPv6 mask are not supported */
+       if (fdir_mask->src_ipv6_mask)
+               return (-ENOTSUP);
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->fdir_set_masks, -ENOTSUP);
+       if  (*dev->dev_ops->fdir_set_masks)
+               return (*dev->dev_ops->fdir_set_masks)(dev, fdir_mask);
+
+       PMD_DEBUG_TRACE("port %d: FDIR feature not supported\n",
+                       port_id);
+       return -ENOTSUP;
+}
+
+int
+rte_eth_dev_flow_ctrl_set(uint8_t port_id, struct rte_eth_fc_conf *fc_conf)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       if ((fc_conf->send_xon != 0) && (fc_conf->send_xon != 1)) {
+               PMD_DEBUG_TRACE("Invalid send_xon, only 0/1 allowed\n");
+               return (-EINVAL);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       /* High water, low water validation are device specific */
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
+       if  (*dev->dev_ops->flow_ctrl_set)
+               return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+
+       return -ENOTSUP;
+}
+
+int
+rte_eth_led_on(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_on, -ENOTSUP);
+       return ((*dev->dev_ops->dev_led_on)(dev));
+}
+
+int
+rte_eth_led_off(uint8_t port_id)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_led_off, -ENOTSUP);
+       return ((*dev->dev_ops->dev_led_off)(dev));
+}
+
+/*
+ * Returns index into MAC address array of addr. Use 00:00:00:00:00:00 to find
+ * an empty spot.
+ */
+static inline int
+get_mac_addr_index(uint8_t port_id, struct ether_addr *addr)
+{
+       struct rte_eth_dev_info dev_info;
+       struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+       unsigned i;
+
+       rte_eth_dev_info_get(port_id, &dev_info);
+
+       for (i = 0; i < dev_info.max_mac_addrs; i++)
+               if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+                       return i;
+
+       return -1;
+}
+
+static struct ether_addr null_mac_addr = {{0, 0, 0, 0, 0, 0}};
+
+int
+rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr,
+               uint32_t pool)
+{
+       struct rte_eth_dev *dev;
+       int index;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
+       if (is_zero_ether_addr(addr)) {
+               PMD_DEBUG_TRACE("port %d: Cannot add NULL MAC address\n", port_id);
+               return (-EINVAL);
+       }
+
+       /* Check if it's already there, and do nothing */
+       index = get_mac_addr_index(port_id, addr);
+       if (index >= 0)
+               return 0;
+
+       index = get_mac_addr_index(port_id, &null_mac_addr);
+       if (index < 0) {
+               PMD_DEBUG_TRACE("port %d: MAC address array full\n", port_id);
+               return (-ENOSPC);
+       }
+
+       /* Update NIC */
+       (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+
+       /* Update address in NIC data structure */
+       ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+
+       return 0;
+}
+
+int
+rte_eth_dev_mac_addr_remove(uint8_t port_id, struct ether_addr *addr)
+{
+       struct rte_eth_dev *dev;
+       int index;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return (-ENODEV);
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_remove, -ENOTSUP);
+       index = get_mac_addr_index(port_id, addr);
+       if (index == 0) {
+               PMD_DEBUG_TRACE("port %d: Cannot remove default MAC address\n", port_id);
+               return (-EADDRINUSE);
+       } else if (index < 0)
+               return 0;  /* Do nothing if address wasn't found */
+
+       /* Update NIC */
+       (*dev->dev_ops->mac_addr_remove)(dev, index);
+
+       /* Update address in NIC data structure */
+       ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+
+       return 0;
+}
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+uint16_t
+rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+                struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return 0;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->rx_pkt_burst, -ENOTSUP);
+       if (queue_id >= dev->data->nb_rx_queues) {
+               PMD_DEBUG_TRACE("Invalid RX queue_id=%d\n", queue_id);
+               return 0;
+       }
+       return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id],
+                                               rx_pkts, nb_pkts);
+}
+
+uint16_t
+rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+                struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev;
+
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return 0;
+       }
+       dev = &rte_eth_devices[port_id];
+
+       FUNC_PTR_OR_ERR_RET(*dev->tx_pkt_burst, -ENOTSUP);
+       if (queue_id >= dev->data->nb_tx_queues) {
+               PMD_DEBUG_TRACE("Invalid TX queue_id=%d\n", queue_id);
+               return 0;
+       }
+       return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
+                                               tx_pkts, nb_pkts);
+}
+#endif
+
+int
+rte_eth_dev_callback_register(uint8_t port_id,
+                       enum rte_eth_event_type event,
+                       rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+       int ret = -1;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_callback *user_cb = NULL;
+
+       if (!cb_fn)
+               return -1;
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return -1;
+       }
+       dev = &rte_eth_devices[port_id];
+       rte_spinlock_lock(&rte_eth_dev_cb_lock);
+       TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
+               if (user_cb->cb_fn == cb_fn &&
+                       user_cb->cb_arg == cb_arg &&
+                       user_cb->event == event) {
+                       ret = 0;
+                       goto out;
+               }
+       }
+       user_cb = rte_malloc("INTR_USER_CALLBACK",
+               sizeof(struct rte_eth_dev_callback), 0);
+       if (!user_cb)
+               goto out;
+       user_cb->cb_fn = cb_fn;
+       user_cb->cb_arg = cb_arg;
+       user_cb->event = event;
+       TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
+       ret = 0;
+
+out:
+       rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+
+       return ret;
+}
+
+int
+rte_eth_dev_callback_unregister(uint8_t port_id,
+                       enum rte_eth_event_type event,
+                       rte_eth_dev_cb_fn cb_fn, void *cb_arg)
+{
+       int ret = -1;
+       struct rte_eth_dev *dev;
+       struct rte_eth_dev_callback *cb_lst = NULL;
+
+       if (!cb_fn)
+               return -1;
+       if (port_id >= nb_ports) {
+               PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+               return -1;
+       }
+       dev = &rte_eth_devices[port_id];
+       rte_spinlock_lock(&rte_eth_dev_cb_lock);
+       TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
+               if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
+                       continue;
+               if (cb_lst->cb_arg == (void *)-1 ||
+                               cb_lst->cb_arg == cb_arg) {
+                       TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
+                       rte_free(cb_lst);
+                       ret = 0;
+               }
+       }
+
+       rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+
+       return ret;
+}
+
+void
+_rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
+{
+       struct rte_eth_dev_callback *cb_lst = NULL;
+       struct rte_eth_dev_callback dev_cb;
+
+       rte_spinlock_lock(&rte_eth_dev_cb_lock);
+       TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
+               if (cb_lst->cb_fn == NULL || cb_lst->event != event)
+                       continue;
+               dev_cb = *cb_lst;
+               rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+               dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+                                               dev_cb.cb_arg);
+               rte_spinlock_lock(&rte_eth_dev_cb_lock);
+       }
+       rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+}
+
diff --git a/lib/librte_ether/rte_ethdev.h b/lib/librte_ether/rte_ethdev.h
new file mode 100644 (file)
index 0000000..b5b6c9e
--- /dev/null
@@ -0,0 +1,1809 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_ETHDEV_H_
+#define _RTE_ETHDEV_H_
+
+/**
+ * @file
+ *
+ * RTE Ethernet Device API
+ *
+ * The Ethernet Device API is composed of two parts:
+ *
+ * - The application-oriented Ethernet API that includes functions to setup
+ *   an Ethernet device (configure it, setup its RX and TX queues and start it),
+ *   to get its MAC address, the speed and the status of its physical link,
+ *   to receive and to transmit packets, and so on.
+ *
+ * - The driver-oriented Ethernet API that exports a function allowing
+ *   an Ethernet Poll Mode Driver (PMD) to simultaneously register itself as
+ *   an Ethernet device driver and as a PCI driver for a set of matching PCI
+ *   [Ethernet] devices classes.
+ *
+ * By default, all the functions of the Ethernet Device API exported by a PMD
+ * are lock-free functions which assume to not be invoked in parallel on
+ * different logical cores to work on the same target object.  For instance,
+ * the receive function of a PMD cannot be invoked in parallel on two logical
+ * cores to poll the same RX queue [of the same port]. Of course, this function
+ * can be invoked in parallel by different logical cores on different RX queues.
+ * It is the responsibility of the upper level application to enforce this rule.
+ *
+ * If needed, parallel accesses by multiple logical cores to shared queues
+ * shall be explicitly protected by dedicated inline lock-aware functions
+ * built on top of their corresponding lock-free functions of the PMD API.
+ *
+ * In all functions of the Ethernet API, the Ethernet device is
+ * designated by an integer >= 0 named the device port identifier.
+ *
+ * At the Ethernet driver level, Ethernet devices are represented by a generic
+ * data structure of type *rte_eth_dev*.
+ *
+ * Ethernet devices are dynamically registered during the PCI probing phase
+ * performed at EAL initialization time.
+ * When an Ethernet device is being probed, an *rte_eth_dev* structure and
+ * a new port identifier are allocated for that device. Then, the eth_dev_init()
+ * function supplied by the Ethernet driver matching the probed PCI
+ * device is invoked to properly initialize the device.
+ *
+ * The role of the device init function consists of resetting the hardware,
+ * checking access to Non-volatile Memory (NVM), reading the MAC address
+ * from NVM etc.
+ *
+ * If the device init operation is successful, the correspondence between
+ * the port identifier assigned to the new device and its associated
+ * *rte_eth_dev* structure is effectively registered.
+ * Otherwise, both the *rte_eth_dev* structure and the port identifier are
+ * freed.
+ *
+ * The functions exported by the application Ethernet API to setup a device
+ * designated by its port identifier must be invoked in the following order:
+ *     - rte_eth_dev_configure()
+ *     - rte_eth_tx_queue_setup()
+ *     - rte_eth_rx_queue_setup()
+ *     - rte_eth_dev_start()
+ *
+ * Then, the network application can invoke, in any order, the functions
+ * exported by the Ethernet API to get the MAC address of a given device, to
+ * get the speed and the status of a device physical link, to receive/transmit
+ * [burst of] packets, and so on.
+ *
+ * If the application wants to change the configuration (i.e. call
+ * rte_eth_dev_configure(), rte_eth_tx_queue_setup(), or
+ * rte_eth_rx_queue_setup()), it must call rte_eth_dev_stop() first to stop the
+ * device and then do the reconfiguration before calling rte_eth_dev_start()
+ * again. The tramsit and receive functions should not be invoked when the
+ * device is stopped.
+ *
+ * Please note that some configuration is not stored between calls to
+ * rte_eth_dev_stop()/rte_eth_dev_start(). The following configuration will
+ * be retained:
+ *
+ *     - flow control settings
+ *     - receive mode configuration (promiscuous mode, hardware checksum mode,
+ *       RSS/VMDQ settings etc.)
+ *     - VLAN filtering configuration
+ *     - MAC addresses supplied to MAC address array
+ *     - flow director filtering mode (but not filtering rules)
+ *
+ * Any other configuration will not be stored and will need to be re-entered
+ * after a call to rte_eth_dev_start().
+ *
+ *
+ * Finally, a network application can close an Ethernet device by invoking the
+ * rte_eth_dev_close() function.
+ *
+ * Each function of the application Ethernet API invokes a specific function
+ * of the PMD that controls the target device designated by its port
+ * identifier.
+ * For this purpose, all device-specific functions of an Ethernet driver are
+ * supplied through a set of pointers contained in a generic structure of type
+ * *eth_dev_ops*.
+ * The address of the *eth_dev_ops* structure is stored in the *rte_eth_dev*
+ * structure by the device init function of the Ethernet driver, which is
+ * invoked during the PCI probing phase, as explained earlier.
+ *
+ * In other words, each function of the Ethernet API simply retrieves the
+ * *rte_eth_dev* structure associated with the device port identifier and
+ * performs an indirect invocation of the corresponding driver function
+ * supplied in the *eth_dev_ops* structure of the *rte_eth_dev* structure.
+ *
+ * For performance reasons, the address of the burst-oriented RX and TX
+ * functions of the Ethernet driver are not contained in the *eth_dev_ops*
+ * structure. Instead, they are directly stored at the beginning of the
+ * *rte_eth_dev* structure to avoid an extra indirect memory access during
+ * their invocation.
+ *
+ * RTE ethernet device drivers do not use interrupts for transmitting or
+ * receiving. Instead, Ethernet drivers export Poll-Mode receive and transmit
+ * functions to applications.
+ * Both receive and transmit functions are packet-burst oriented to minimize
+ * their cost per packet through the following optimizations:
+ *
+ * - Sharing among multiple packets the incompressible cost of the
+ *   invocation of receive/transmit functions.
+ *
+ * - Enabling receive/transmit functions to take advantage of burst-oriented
+ *   hardware features (L1 cache, prefetch instructions, NIC head/tail
+ *   registers) to minimize the number of CPU cycles per packet, for instance,
+ *   by avoiding useless read memory accesses to ring descriptors, or by
+ *   systematically using arrays of pointers that exactly fit L1 cache line
+ *   boundaries and sizes.
+ *
+ * The burst-oriented receive function does not provide any error notification,
+ * to avoid the corresponding overhead. As a hint, the upper-level application
+ * might check the status of the device link once being systematically returned
+ * a 0 value by the receive function of the driver for a given number of tries.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <rte_log.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_mbuf.h>
+#include "rte_ether.h"
+
+/**
+ * A structure used to retrieve statistics for an Ethernet port.
+ */
+struct rte_eth_stats {
+       uint64_t ipackets;  /**< Total number of successfully received packets. */
+       uint64_t opackets;  /**< Total number of successfully transmitted packets.*/
+       uint64_t ibytes;    /**< Total number of successfully received bytes. */
+       uint64_t obytes;    /**< Total number of successfully transmitted bytes. */
+       uint64_t ierrors;   /**< Total number of erroneous received packets. */
+       uint64_t oerrors;   /**< Total number of failed transmitted packets. */
+       uint64_t imcasts;   /**< Total number of multicast received packets. */
+       uint64_t rx_nombuf; /**< Total number of RX mbuf allocation failures. */
+       uint64_t fdirmatch; /**< Total number of RX packets matching a filter. */
+       uint64_t fdirmiss;  /**< Total number of RX packets not matching any filter. */
+};
+
+/**
+ * A structure used to retrieve link-level information of an Ethernet port.
+ */
+struct rte_eth_link {
+       uint16_t link_speed;      /**< ETH_LINK_SPEED_[10, 100, 1000, 10000] */
+       uint16_t link_duplex;     /**< ETH_LINK_[HALF_DUPLEX, FULL_DUPLEX] */
+       uint8_t  link_status : 1; /**< 1 -> link up, 0 -> link down */
+}__attribute__((aligned(8)));     /**< aligned for atomic64 read/write */
+
+#define ETH_LINK_SPEED_AUTONEG  0       /**< Auto-negotiate link speed. */
+#define ETH_LINK_SPEED_10       10      /**< 10 megabits/second. */
+#define ETH_LINK_SPEED_100      100     /**< 100 megabits/second. */
+#define ETH_LINK_SPEED_1000     1000    /**< 1 gigabits/second. */
+#define ETH_LINK_SPEED_10000    10000   /**< 10 gigabits/second. */
+
+#define ETH_LINK_AUTONEG_DUPLEX 0       /**< Auto-negotiate duplex. */
+#define ETH_LINK_HALF_DUPLEX    1       /**< Half-duplex connection. */
+#define ETH_LINK_FULL_DUPLEX    2       /**< Full-duplex connection. */
+
+/**
+ * A structure used to configure the ring threshold registers of an RX/TX
+ * queue for an Ethernet port.
+ */
+struct rte_eth_thresh {
+       uint8_t pthresh; /**< Ring prefetch threshold. */
+       uint8_t hthresh; /**< Ring host threshold. */
+       uint8_t wthresh; /**< Ring writeback threshold. */
+};
+
+/**
+ *  A set of values to identify what method is to be used to route
+ *  packets to multiple queues.
+ */
+enum rte_eth_rx_mq_mode {
+       ETH_RSS     = 0,     /**< Default to RSS mode */
+       ETH_VMDQ_DCB         /**< Use VMDQ+DCB to route traffic to queues */
+};
+
+/**
+ * A structure used to configure the RX features of an Ethernet port.
+ */
+struct rte_eth_rxmode {
+       /** The multi-queue packet distribution mode to be used, e.g. RSS. */
+       enum rte_eth_rx_mq_mode mq_mode;
+       uint32_t max_rx_pkt_len;    /**< Only used if jumbo_frame enabled. */
+       uint16_t split_hdr_size;    /**< hdr buf size (header_split enabled).*/
+       uint8_t header_split   : 1, /**< Header Split enable. */
+               hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
+               hw_vlan_filter : 1, /**< VLAN filter enable. */
+               jumbo_frame    : 1, /**< Jumbo Frame Receipt enable. */
+               hw_strip_crc   : 1; /**< Enable CRC stripping by hardware. */
+};
+
+/**
+ * A structure used to configure the Receive Side Scaling (RSS) feature
+ * of an Ethernet port.
+ * If not NULL, the *rss_key* pointer of the *rss_conf* structure points
+ * to an array of 40 bytes holding the RSS key to use for hashing specific
+ * header fields of received packets.
+ * Otherwise, a default random hash key is used by the device driver.
+ *
+ * The *rss_hf* field of the *rss_conf* structure indicates the different
+ * types of IPv4/IPv6 packets to which the RSS hashing must be applied.
+ * Supplying an *rss_hf* equal to zero disables the RSS feature.
+ */
+struct rte_eth_rss_conf {
+       uint8_t  *rss_key;   /**< If not NULL, 40-byte hash key. */
+       uint16_t rss_hf;     /**< Hash functions to apply - see below. */
+};
+
+#define ETH_RSS_IPV4        0x0001 /**< IPv4 packet. */
+#define ETH_RSS_IPV4_TCP    0x0002 /**< IPv4/TCP packet. */
+#define ETH_RSS_IPV6        0x0004 /**< IPv6 packet. */
+#define ETH_RSS_IPV6_EX     0x0008 /**< IPv6 packet with extension headers.*/
+#define ETH_RSS_IPV6_TCP    0x0010 /**< IPv6/TCP packet. */
+#define ETH_RSS_IPV6_TCP_EX 0x0020 /**< IPv6/TCP with extension headers. */
+/* Intel RSS extensions to UDP packets */
+#define ETH_RSS_IPV4_UDP    0x0040 /**< IPv4/UDP packet. */
+#define ETH_RSS_IPV6_UDP    0x0080 /**< IPv6/UDP packet. */
+#define ETH_RSS_IPV6_UDP_EX 0x0100 /**< IPv6/UDP with extension headers. */
+
+/* Definitions used for VMDQ and DCB functionality */
+#define ETH_VMDQ_MAX_VLAN_FILTERS   64 /**< Maximum nb. of VMDQ vlan filters. */
+#define ETH_DCB_NUM_USER_PRIORITIES 8  /**< Maximum nb. of DCB priorities. */
+#define ETH_VMDQ_DCB_NUM_QUEUES     128 /**< Maximum nb. of VMDQ DCB queues. */
+
+/**
+ * This enum indicates the possible number of queue pools
+ * in VMDQ+DCB configurations.
+ */
+enum rte_eth_nb_pools {
+       ETH_16_POOLS = 16, /**< 16 pools with DCB. */
+       ETH_32_POOLS = 32  /**< 32 pools with DCB. */
+};
+
+/**
+ * A structure used to configure the VMDQ+DCB feature
+ * of an Ethernet port.
+ *
+ * Using this feature, packets are routed to a pool of queues, based
+ * on the vlan id in the vlan tag, and then to a specific queue within
+ * that pool, using the user priority vlan tag field.
+ *
+ * A default pool may be used, if desired, to route all traffic which
+ * does not match the vlan filter rules.
+ */
+struct rte_eth_vmdq_dcb_conf {
+       enum rte_eth_nb_pools nb_queue_pools; /**< With DCB, 16 or 32 pools */
+       uint8_t enable_default_pool; /**< If non-zero, use a default pool */
+       uint8_t default_pool; /**< The default pool, if applicable */
+       uint8_t nb_pool_maps; /**< We can have up to 64 filters/mappings */
+       struct {
+               uint16_t vlan_id; /**< The vlan id of the received frame */
+               uint64_t pools;   /**< Bitmask of pools for packet rx */
+       } pool_map[ETH_VMDQ_MAX_VLAN_FILTERS]; /**< VMDq vlan pool maps. */
+       uint8_t dcb_queue[ETH_DCB_NUM_USER_PRIORITIES];
+       /**< Selects a queue in a pool */
+};
+
+/**
+ * A structure used to configure the TX features of an Ethernet port.
+ * For future extensions.
+ */
+struct rte_eth_txmode {
+};
+
+/**
+ * A structure used to configure an RX ring of an Ethernet port.
+ */
+struct rte_eth_rxconf {
+       struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
+       uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
+};
+
+/**
+ * A structure used to configure a TX ring of an Ethernet port.
+ */
+struct rte_eth_txconf {
+       struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */
+       uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
+       uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
+};
+
+/**
+ * This enum indicates the flow control mode
+ */
+enum rte_eth_fc_mode {
+       RTE_FC_NONE = 0, /**< Disable flow control. */
+       RTE_FC_RX_PAUSE, /**< RX pause frame, enable flowctrl on TX side. */
+       RTE_FC_TX_PAUSE, /**< TX pause frame, enable flowctrl on RX side. */
+       RTE_FC_FULL      /**< Enable flow control on both side. */
+};
+
+/**
+ * A structure used to configure Ethernet flow control parameter.
+ * These parameters will be configured into the register of the NIC.
+ * Please refer to the corresponding data sheet for proper value.
+ */
+struct rte_eth_fc_conf {
+       uint32_t high_water;  /**< High threshold value to trigger XOFF */
+       uint32_t low_water;   /**< Low threshold value to trigger XON */
+       uint16_t pause_time;  /**< Pause quota in the Pause frame */
+       uint16_t send_xon;    /**< Is XON frame need be sent */
+       enum rte_eth_fc_mode mode;  /**< Link flow control mode */
+};
+
+/**
+ *  Flow Director setting modes: none (default), signature or perfect.
+ */
+enum rte_fdir_mode {
+       RTE_FDIR_MODE_NONE      = 0, /**< Disable FDIR support. */
+       RTE_FDIR_MODE_SIGNATURE,     /**< Enable FDIR signature filter mode. */
+       RTE_FDIR_MODE_PERFECT,       /**< Enable FDIR perfect filter mode. */
+};
+
+/**
+ *  Memory space that can be configured to store Flow Director filters
+ *  in the board memory.
+ */
+enum rte_fdir_pballoc_type {
+       RTE_FDIR_PBALLOC_64K = 0,  /**< 64k. */
+       RTE_FDIR_PBALLOC_128K,     /**< 128k. */
+       RTE_FDIR_PBALLOC_256K,     /**< 256k. */
+};
+
+/**
+ *  Select report mode of FDIR hash information in RX descriptors.
+ */
+enum rte_fdir_status_mode {
+       RTE_FDIR_NO_REPORT_STATUS = 0, /**< Never report FDIR hash. */
+       RTE_FDIR_REPORT_STATUS, /**< Only report FDIR hash for matching pkts. */
+       RTE_FDIR_REPORT_STATUS_ALWAYS, /**< Always report FDIR hash. */
+};
+
+/**
+ * A structure used to configure the Flow Director (FDIR) feature
+ * of an Ethernet port.
+ *
+ * If mode is RTE_FDIR_DISABLE, the pballoc value is ignored.
+ */
+struct rte_fdir_conf {
+       enum rte_fdir_mode mode; /**< Flow Director mode. */
+       enum rte_fdir_pballoc_type pballoc; /**< Space for FDIR filters. */
+       enum rte_fdir_status_mode status;  /**< How to report FDIR hash. */
+       /* Offset of flexbytes field in RX packets (in 16-bit word units). */
+       uint8_t flexbytes_offset;
+       /* RX queue of packets matching a "drop" filter in perfect mode. */
+       uint8_t drop_queue;
+};
+
+/**
+ *  Possible l4type of FDIR filters.
+ */
+enum rte_l4type {
+       RTE_FDIR_L4TYPE_NONE = 0,       /**< Nnoe. */
+       RTE_FDIR_L4TYPE_UDP,            /**< UDP. */
+       RTE_FDIR_L4TYPE_TCP,            /**< TCP. */
+       RTE_FDIR_L4TYPE_SCTP,           /**< SCTP. */
+};
+
+/**
+ *  Select IPv4 or IPv6 FDIR filters.
+ */
+enum rte_iptype {
+       RTE_FDIR_IPTYPE_IPV4 = 0,     /**< IPv4. */
+       RTE_FDIR_IPTYPE_IPV6 ,        /**< IPv6. */
+};
+
+/**
+ *  A structure used to define a FDIR packet filter.
+ */
+struct rte_fdir_filter {
+       uint16_t flex_bytes; /**< Flex bytes value to match. */
+       uint16_t vlan_id; /**< VLAN ID value to match, 0 otherwise. */
+       uint16_t port_src; /**< Source port to match, 0 otherwise. */
+       uint16_t port_dst; /**< Destination port to match, 0 otherwise. */
+       union {
+               uint32_t ipv4_addr; /**< IPv4 source address to match. */
+               uint32_t ipv6_addr[4]; /**< IPv6 source address to match. */
+       } ip_src; /**< IPv4/IPv6 source address to match (union of above). */
+       union {
+               uint32_t ipv4_addr; /**< IPv4 destination address to match. */
+               uint32_t ipv6_addr[4]; /**< IPv6 destination address to match */
+       } ip_dst; /**< IPv4/IPv6 destination address to match (union of above). */
+       enum rte_l4type l4type; /**< l4type to match: NONE/UDP/TCP/SCTP. */
+       enum rte_iptype iptype; /**< IP packet type to match: IPv4 or IPv6. */
+};
+
+/**
+ *  A structure used to configure FDIR masks that are used by the device
+ *  to match the various fields of RX packet headers.
+ */
+struct rte_fdir_masks {
+       /** When set to 1, packet l4type is not relevant in filters, and
+          source and destination port masks must be set to zero. */
+       uint8_t only_ip_flow;
+       uint8_t vlan_id; /**< If set to 1, vlan_id is relevant in filters. */
+       uint8_t vlan_prio; /**< If set to 1, vlan_prio is relevant in filters. */
+       uint8_t flexbytes; /**< If set to 1, flexbytes is relevant in filters. */
+       /** Mask of Destination IPv4 Address. All bits set to 1 define the
+          relevant bits to use in the destination address of an IPv4 packet
+          when matching it against FDIR filters. */
+       uint32_t dst_ipv4_mask;
+       /** Mask of Source IPv4 Address. All bits set to 1 define
+          the relevant bits to use in the source address of an IPv4 packet
+          when matching it against FDIR filters. */
+       uint32_t src_ipv4_mask;
+       /** Mask of Source IPv6 Address. All bits set to 1 define the
+          relevant BYTES to use in the source address of an IPv6 packet
+          when matching it against FDIR filters. */
+       uint16_t src_ipv6_mask;
+       /** Mask of Source Port. All bits set to 1 define the relevant
+          bits to use in the source port of an IP packets when matching it
+          against FDIR filters. */
+       uint16_t src_port_mask;
+       /** Mask of Destination Port. All bits set to 1 define the relevant
+          bits to use in the destination port of an IP packet when matching it
+          against FDIR filters. */
+       uint16_t dst_port_mask;
+};
+
+/**
+ *  A structure used to report the status of the flow director filters in use.
+ */
+struct rte_eth_fdir {
+       /** Number of filters with collision indication. */
+       uint16_t collision;
+       /** Number of free (non programmed) filters. */
+       uint16_t free;
+       /** The Lookup hash value of the added filter that updated the value
+          of the MAXLEN field */
+       uint16_t maxhash;
+       /** Longest linked list of filters in the table. */
+       uint8_t maxlen;
+       /** Number of added filters. */
+       uint64_t add;
+       /** Number of removed filters. */
+       uint64_t remove;
+       /** Number of failed added filters (no more space in device). */
+       uint64_t f_add;
+       /** Number of failed removed filters. */
+       uint64_t f_remove;
+};
+
+/**
+ * A structure used to enable/disable specific device interrupts.
+ */
+struct rte_intr_conf {
+       /** enable/disable lsc interrupt. 0 (default) - disable, 1 enable */
+       uint16_t lsc;
+};
+
+/**
+ * A structure used to configure an Ethernet port.
+ * Depending upon the RX multi-queue mode, extra advanced
+ * configuration settings may be needed.
+ */
+struct rte_eth_conf {
+       uint16_t link_speed;
+       /**< ETH_LINK_SPEED_10[0|00|000], or 0 for autonegotation */
+       uint16_t link_duplex;
+       /**< ETH_LINK_[HALF_DUPLEX|FULL_DUPLEX], or 0 for autonegotation */
+       struct rte_eth_rxmode rxmode; /**< Port RX configuration. */
+       struct rte_eth_txmode txmode; /**< Port TX configuration. */
+       union {
+               struct rte_eth_rss_conf rss_conf; /**< Port RSS configuration */
+               struct rte_eth_vmdq_dcb_conf vmdq_dcb_conf;
+               /**< Port vmdq+dcb configuration. */
+       } rx_adv_conf; /**< Port RX filtering configuration (union). */
+       struct rte_fdir_conf fdir_conf; /**< FDIR configuration. */
+       struct rte_intr_conf intr_conf; /**< Interrupt mode configuration. */
+};
+
+/**
+ * A structure used to retrieve the contextual information of
+ * an Ethernet device, such as the controlling driver of the device,
+ * its PCI context, etc...
+ */
+struct rte_eth_dev_info {
+       struct rte_pci_device *pci_dev; /**< Device PCI information. */
+       const char *driver_name; /**< Device Driver name. */
+       uint32_t min_rx_bufsize; /**< Minimum size of RX buffer. */
+       uint32_t max_rx_pktlen; /**< Maximum configurable length of RX pkt. */
+       uint16_t max_rx_queues; /**< Maximum number of RX queues. */
+       uint16_t max_tx_queues; /**< Maximum number of TX queues. */
+       uint32_t max_mac_addrs; /**< Maximum number of MAC addresses. */
+};
+
+struct rte_eth_dev;
+struct igb_rx_queue;
+struct igb_tx_queue;
+
+struct rte_eth_dev_callback;
+/** @internal Structure to keep track of registered callbacks */
+TAILQ_HEAD(rte_eth_dev_cb_list, rte_eth_dev_callback);
+
+/*
+ * Definitions of all functions exported by an Ethernet driver through the
+ * the generic structure of type *eth_dev_ops* supplied in the *rte_eth_dev*
+ * structure associated with an Ethernet device.
+ */
+
+typedef int  (*eth_dev_configure_t)(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+                                   uint16_t nb_tx_q);
+/**< Ethernet device configuration. */
+
+typedef int  (*eth_dev_start_t)(struct rte_eth_dev *dev);
+/**< Function used to start a configured Ethernet device. */
+
+typedef void (*eth_dev_stop_t)(struct rte_eth_dev *dev);
+/**< Function used to stop a configured Ethernet device. */
+
+typedef void (*eth_dev_close_t)(struct rte_eth_dev *dev);
+/**< @internal Function used to close a configured Ethernet device. */
+
+typedef void (*eth_promiscuous_enable_t)(struct rte_eth_dev *dev);
+/**< Function used to enable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_promiscuous_disable_t)(struct rte_eth_dev *dev);
+/**< Function used to disable the RX promiscuous mode of an Ethernet device. */
+
+typedef void (*eth_allmulticast_enable_t)(struct rte_eth_dev *dev);
+/**< Enable the receipt of all multicast packets by an Ethernet device. */
+
+typedef void (*eth_allmulticast_disable_t)(struct rte_eth_dev *dev);
+/**< Disable the receipt of all multicast packets by an Ethernet device. */
+
+typedef int (*eth_link_update_t)(struct rte_eth_dev *dev,
+                               int wait_to_complete);
+/**< Get link speed, duplex mode and state (up/down) of an Ethernet device. */
+
+typedef void (*eth_stats_get_t)(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *igb_stats);
+/**< Get global I/O statistics of an Ethernet device. */
+
+typedef void (*eth_stats_reset_t)(struct rte_eth_dev *dev);
+/**< Reset global I/O statistics of an Ethernet device to 0. */
+
+typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
+                                   struct rte_eth_dev_info *dev_info);
+/**< Get specific informations of an Ethernet device. */
+
+typedef int (*eth_rx_queue_setup_t)(struct rte_eth_dev *dev,
+                                   uint16_t rx_queue_id,
+                                   uint16_t nb_rx_desc,
+                                   unsigned int socket_id,
+                                   const struct rte_eth_rxconf *rx_conf,
+                                   struct rte_mempool *mb_pool);
+/**< Set up a receive queue of an Ethernet device. */
+
+typedef int (*eth_tx_queue_setup_t)(struct rte_eth_dev *dev,
+                                   uint16_t tx_queue_id,
+                                   uint16_t nb_tx_desc,
+                                   unsigned int socket_id,
+                                   const struct rte_eth_txconf *tx_conf);
+/**< Setup a transmit queue of an Ethernet device. */
+
+typedef void (*vlan_filter_set_t)(struct rte_eth_dev *dev,
+                                 uint16_t vlan_id,
+                                 int on);
+/**< Enable/Disable filtering of a VLAN Tag Identifier by an Ethernet device. */
+
+typedef uint16_t (*eth_rx_burst_t)(struct igb_rx_queue *rxq,
+                                  struct rte_mbuf **rx_pkts,
+                                  uint16_t nb_pkts);
+/**< Retrieve input packets from a receive queue of an Ethernet device. */
+
+typedef uint16_t (*eth_tx_burst_t)(struct igb_tx_queue *txq,
+                                  struct rte_mbuf **tx_pkts,
+                                  uint16_t nb_pkts);
+/**< Send output packets on a transmit queue of an Ethernet device. */
+
+typedef int (*fdir_add_signature_filter_t)(struct rte_eth_dev *dev,
+                                          struct rte_fdir_filter *fdir_ftr,
+                                          uint8_t rx_queue);
+/**< Setup a new signature filter rule on an Ethernet device */
+
+typedef int (*fdir_update_signature_filter_t)(struct rte_eth_dev *dev,
+                                             struct rte_fdir_filter *fdir_ftr,
+                                             uint8_t rx_queue);
+/**< Update a signature filter rule on an Ethernet device */
+
+typedef int (*fdir_remove_signature_filter_t)(struct rte_eth_dev *dev,
+                                             struct rte_fdir_filter *fdir_ftr);
+/**< Remove a  signature filter rule on an Ethernet device */
+
+typedef void (*fdir_infos_get_t)(struct rte_eth_dev *dev,
+                                struct rte_eth_fdir *fdir);
+/**< Get information about fdir status */
+
+typedef int (*fdir_add_perfect_filter_t)(struct rte_eth_dev *dev,
+                                        struct rte_fdir_filter *fdir_ftr,
+                                        uint16_t soft_id, uint8_t rx_queue,
+                                        uint8_t drop);
+/**< Setup a new perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_update_perfect_filter_t)(struct rte_eth_dev *dev,
+                                           struct rte_fdir_filter *fdir_ftr,
+                                           uint16_t soft_id, uint8_t rx_queue,
+                                           uint8_t drop);
+/**< Update a perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_remove_perfect_filter_t)(struct rte_eth_dev *dev,
+                                           struct rte_fdir_filter *fdir_ftr,
+                                           uint16_t soft_id);
+/**< Remove a perfect filter rule on an Ethernet device */
+
+typedef int (*fdir_set_masks_t)(struct rte_eth_dev *dev,
+                               struct rte_fdir_masks *fdir_masks);
+/**< Setup flow director masks on an Ethernet device */
+
+typedef int (*flow_ctrl_set_t)(struct rte_eth_dev *dev,
+                               struct rte_eth_fc_conf *fc_conf);
+/**< Setup flow control parameter on an Ethernet device */
+
+typedef int (*eth_dev_led_on_t)(struct rte_eth_dev *dev);
+/**<  Turn on SW controllable LED on an Ethernet device */
+
+typedef int (*eth_dev_led_off_t)(struct rte_eth_dev *dev);
+/**<  Turn off SW controllable LED on an Ethernet device */
+
+typedef void (*eth_mac_addr_remove_t)(struct rte_eth_dev *dev, uint32_t index);
+/**< Remove MAC address from receive address register */
+
+typedef void (*eth_mac_addr_add_t)(struct rte_eth_dev *dev,
+                                 struct ether_addr *mac_addr,
+                                 uint32_t index,
+                                 uint32_t vmdq);
+/**< Set a MAC address into Receive Address Address Register */
+
+/**
+ * A structure containing the functions exported by an Ethernet driver.
+ */
+struct eth_dev_ops {
+       eth_dev_configure_t        dev_configure; /**< Configure device. */
+       eth_dev_start_t            dev_start;     /**< Start device. */
+       eth_dev_stop_t             dev_stop;      /**< Stop device. */
+       eth_dev_close_t            dev_close;     /**< Close device. */
+       eth_promiscuous_enable_t   promiscuous_enable; /**< Promiscuous ON. */
+       eth_promiscuous_disable_t  promiscuous_disable;/**< Promiscuous OFF. */
+       eth_allmulticast_enable_t  allmulticast_enable;/**< RX multicast ON. */
+       eth_allmulticast_disable_t allmulticast_disable;/**< RX multicast OF. */
+       eth_link_update_t          link_update;   /**< Get device link state. */
+       eth_stats_get_t            stats_get;     /**< Get device statistics. */
+       eth_stats_reset_t          stats_reset;   /**< Reset device statistics. */
+       eth_dev_infos_get_t        dev_infos_get; /**< Get device info. */
+       vlan_filter_set_t          vlan_filter_set; /**< Filter VLAN on/off. */
+       eth_rx_queue_setup_t       rx_queue_setup;/**< Set up device RX queue.*/
+       eth_tx_queue_setup_t       tx_queue_setup;/**< Set up device TX queue.*/
+       eth_dev_led_on_t           dev_led_on;    /**< Turn on LED. */
+       eth_dev_led_off_t          dev_led_off;   /**< Turn off LED. */
+       flow_ctrl_set_t            flow_ctrl_set; /**< Setup flow control. */
+       eth_mac_addr_remove_t      mac_addr_remove; /**< Remove MAC address */
+       eth_mac_addr_add_t         mac_addr_add;  /**< Add a MAC address */
+
+       /** Add a signature filter. */
+       fdir_add_signature_filter_t fdir_add_signature_filter;
+       /** Update a signature filter. */
+       fdir_update_signature_filter_t fdir_update_signature_filter;
+       /** Remove a signature filter. */
+       fdir_remove_signature_filter_t fdir_remove_signature_filter;
+       /** Get information about FDIR status. */
+       fdir_infos_get_t fdir_infos_get;
+       /** Add a perfect filter. */
+       fdir_add_perfect_filter_t fdir_add_perfect_filter;
+       /** Update a perfect filter. */
+       fdir_update_perfect_filter_t fdir_update_perfect_filter;
+       /** Remove a perfect filter. */
+       fdir_remove_perfect_filter_t fdir_remove_perfect_filter;
+       /** Setup masks for FDIR filtering. */
+       fdir_set_masks_t fdir_set_masks;
+};
+
+/**
+ * The generic data structure associated with each ethernet device.
+ *
+ * Pointers to burst-oriented packet receive and transmit functions are
+ * located at the beginning of the structure, along with the pointer to
+ * where all the data elements for the particular device are stored in shared
+ * memory. This split allows the function pointer and driver data to be per-
+ * process, while the actual configuration data for the device is shared.
+ */
+struct rte_eth_dev {
+       eth_rx_burst_t rx_pkt_burst; /**< Pointer to PMD receive function. */
+       eth_tx_burst_t tx_pkt_burst; /**< Pointer to PMD transmit function. */
+       struct rte_eth_dev_data *data;  /**< Pointer to device data */
+       const struct eth_driver *driver;/**< Driver for this device */
+       struct eth_dev_ops *dev_ops;    /**< Functions exported by PMD */
+       struct rte_pci_device *pci_dev; /**< PCI info. supplied by probing */
+       struct rte_eth_dev_cb_list callbacks; /**< User application callbacks */
+};
+
+/**
+ * The data part, with no function pointers, associated with each ethernet device.
+ *
+ * This structure is safe to place in shared memory to be common among different
+ * processes in a multi-process configuration.
+ */
+struct rte_eth_dev_data {
+       struct igb_rx_queue **rx_queues; /**< Array of pointers to RX queues. */
+       struct igb_tx_queue **tx_queues; /**< Array of pointers to TX queues. */
+       uint16_t nb_rx_queues; /**< Number of RX queues. */
+       uint16_t nb_tx_queues; /**< Number of TX queues. */
+
+       void *dev_private;              /**< PMD-specific private data */
+
+       struct rte_eth_link dev_link;
+       /**< Link-level information & status */
+
+       struct rte_eth_conf dev_conf;   /**< Configuration applied to device. */
+       uint16_t max_frame_size;        /**< Default is ETHER_MAX_LEN (1518). */
+
+       uint64_t rx_mbuf_alloc_failed; /**< RX ring mbuf allocation failures. */
+       struct ether_addr* mac_addrs;/**< Device Ethernet Link address. */
+       uint8_t port_id;           /**< Device [external] port identifier. */
+       uint8_t promiscuous   : 1, /**< RX promiscuous mode ON(1) / OFF(0). */
+               scattered_rx : 1,  /**< RX of scattered packets is ON(1) / OFF(0) */
+               all_multicast : 1, /**< RX all multicast mode ON(1) / OFF(0). */
+               dev_started : 1;   /**< Device state: STARTED(1) / STOPPED(0). */
+};
+
+/**
+ * The pool of *rte_eth_dev* structures. The size of the pool
+ * is configured at compile-time in the <rte_ethdev.c> file.
+ */
+extern struct rte_eth_dev rte_eth_devices[];
+
+/**
+ * Get the total number of Ethernet devices that have been successfully
+ * initialized by the [matching] Ethernet driver during the PCI probing phase.
+ * All devices whose port identifier is in the range
+ * [0,  rte_eth_dev_count() - 1] can be operated on by network applications.
+ *
+ * @return
+ *   - The total number of usable Ethernet devices.
+ */
+extern uint8_t rte_eth_dev_count(void);
+
+struct eth_driver;
+/**
+ * Initialization function of an Ethernet driver invoked for each matching
+ * Ethernet PCI device detected during the PCI probing phase.
+ *
+ * @param eth_drv
+ *   The pointer to the [matching] Ethernet driver structure supplied by
+ *   the PMD when it registered itself.
+ * @param eth_dev
+ *   The *eth_dev* pointer is the address of the *rte_eth_dev* structure
+ *   associated with the matching device and which have been [automatically]
+ *   allocated in the *rte_eth_devices* array.
+ *   The *eth_dev* structure is supplied to the driver initialization function
+ *   with the following fields already initialized:
+ *
+ *   - *pci_dev*: Holds the pointers to the *rte_pci_device* structure which
+ *     contains the generic PCI information of the matching device.
+ *
+ *   - *dev_private*: Holds a pointer to the device private data structure.
+ *
+ *   - *max_frame_size*: Contains the default Ethernet maximum frame length
+ *     (1518).
+ *
+ *   - *port_id*: Contains the port index of the device (actually the index
+ *     of the *eth_dev* structure in the *rte_eth_devices* array).
+ *
+ * @return
+ *   - 0: Success, the device is properly initialized by the driver.
+ *        In particular, the driver MUST have set up the *dev_ops* pointer
+ *        of the *eth_dev* structure.
+ *   - <0: Error code of the device initialization failure.
+ */
+typedef int (*eth_dev_init_t)(struct eth_driver  *eth_drv,
+                             struct rte_eth_dev *eth_dev);
+
+/**
+ * The structure associated with a PMD Ethernet driver.
+ *
+ * Each Ethernet driver acts as a PCI driver and is represented by a generic
+ * *eth_driver* structure that holds:
+ *
+ * - An *rte_pci_driver* structure (which must be the first field).
+ *
+ * - The *eth_dev_init* function invoked for each matching PCI device.
+ *
+ * - The size of the private data to allocate for each matching device.
+ */
+struct eth_driver {
+       struct rte_pci_driver pci_drv;    /**< The PMD is also a PCI driver. */
+       eth_dev_init_t eth_dev_init;      /**< Device init function. */
+       unsigned int dev_private_size;    /**< Size of device private data. */
+};
+
+/**
+ * A function invoked by the initialization function of an Ethernet driver
+ * to simultaneously register itself as a PCI driver and as an Ethernet
+ * Poll Mode Driver (PMD).
+ *
+ * @param eth_drv
+ *   The pointer to the *eth_driver* structure associated with
+ *   the Ethernet driver.
+ */
+extern void rte_eth_driver_register(struct eth_driver *eth_drv);
+
+/**
+ * The initialization function of the driver for
+ * Intel(r) IGB Gigabit Ethernet Controller devices.
+ * This function is invoked once at EAL start time.
+ * @return
+ *   0 on success
+ */
+extern int rte_igb_pmd_init(void);
+
+/**
+ * The initialization function of the driver for 10Gbps Intel IXGBE
+ * Ethernet devices.
+ * Invoked once at EAL start time.
+ * @return
+ *   0 on success
+ */
+extern int rte_ixgbe_pmd_init(void);
+
+/**
+ * The initialization function of the driver for 10Gbps Intel IXGBE_VF
+ * Ethernet devices.
+ * Invoked once at EAL start time.
+ * @return
+ *   0 on success
+ */
+extern int rte_ixgbevf_pmd_init(void);
+
+/**
+ * Configure an Ethernet device.
+ * This function must be invoked first before any other function in the
+ * Ethernet API. This function can also be re-invoked when a device is in the
+ * stopped state.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device to configure.
+ * @param nb_rx_queue
+ *   The number of receive queues to set up for the Ethernet device.
+ * @param nb_tx_queue
+ *   The number of transmit queues to set up for the Ethernet device.
+ * @param eth_conf
+ *   The pointer to the configuration data to be used for the Ethernet device.
+ *   The *rte_eth_conf* structure includes:
+ *     -  the hardware offload features to activate, with dedicated fields for
+ *        each statically configurable offload hardware feature provided by
+ *        Ethernet devices, such as IP checksum or VLAN tag stripping for
+ *        example.
+ *     - the Receive Side Scaling (RSS) configuration when using multiple RX
+ *         queues per port.
+ *
+ *   Embedding all configuration information in a single data structure
+ *   is the more flexible method that allows the addition of new features
+ *   without changing the syntax of the API.
+ * @return
+ *   - 0: Success, device configured.
+ *   - <0: Error code returned by the driver configuration function.
+ */
+extern int rte_eth_dev_configure(uint8_t port_id,
+                                uint16_t nb_rx_queue,
+                                uint16_t nb_tx_queue,
+                                const struct rte_eth_conf *eth_conf);
+
+/**
+ * Allocate and set up a receive queue for an Ethernet device.
+ *
+ * The function allocates a contiguous block of memory for *nb_rx_desc*
+ * receive descriptors from a memory zone associated with *socket_id*
+ * and initializes each receive descriptor with a network buffer allocated
+ * from the memory pool *mb_pool*.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param rx_queue_id
+ *   The index of the receive queue to set up.
+ *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @param nb_rx_desc
+ *   The number of receive descriptors to allocate for the receive ring.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of NUMA.
+ *   The value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
+ *   the DMA memory allocated for the receive descriptors of the ring.
+ * @param rx_conf
+ *   The pointer to the configuration data to be used for the receive queue.
+ *   The *rx_conf* structure contains an *rx_thresh* structure with the values
+ *   of the Prefetch, Host, and Write-Back threshold registers of the receive
+ *   ring.
+ * @param mb_pool
+ *   The pointer to the memory pool from which to allocate *rte_mbuf* network
+ *   memory buffers to populate each descriptor of the receive ring.
+ * @return
+ *   - 0: Success, receive queue correctly set up.
+ *   - -EINVAL: The size of network buffers which can be allocated from the
+ *      memory pool does not fit the various buffer sizes allowed by the
+ *      device controller.
+ *   - -ENOMEM: Unable to allocate the receive ring descriptors or to
+ *      allocate network memory buffers from the memory pool when
+ *      initializing receive descriptors.
+ */
+extern int rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
+                                 uint16_t nb_rx_desc, unsigned int socket_id,
+                                 const struct rte_eth_rxconf *rx_conf,
+                                 struct rte_mempool *mb_pool);
+
+/**
+ * Allocate and set up a transmit queue for an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param tx_queue_id
+ *   The index of the transmit queue to set up.
+ *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @param nb_tx_desc
+ *   The number of transmit descriptors to allocate for the transmit ring.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of NUMA.
+ *   Its value can be *SOCKET_ID_ANY* if there is no NUMA constraint for
+ *   the DMA memory allocated for the transmit descriptors of the ring.
+ * @param tx_conf
+ *   The pointer to the configuration data to be used for the transmit queue.
+ *   The *tx_conf* structure contains the following data:
+ *   - The *tx_thresh* structure with the values of the Prefetch, Host, and
+ *     Write-Back threshold registers of the transmit ring.
+ *     When setting Write-Back threshold to the value greater then zero,
+ *     *tx_rs_thresh* value should be explicitly set to one.
+ *   - The *tx_free_thresh* value indicates the [minimum] number of network
+ *     buffers that must be pending in the transmit ring to trigger their
+ *     [implicit] freeing by the driver transmit function.
+ *   - The *tx_rs_thresh* value indicates the [minimum] number of transmit
+ *     descriptors that must be pending in the transmit ring before setting the
+ *     RS bit on a descriptor by the driver transmit function.
+ *     The *tx_rs_thresh* value should be less or equal then
+ *     *tx_free_thresh* value, and both of them should be less then
+ *     *nb_tx_desc* - 3.
+ *
+ *     Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
+ *     the transmit function to use default values.
+ * @return
+ *   - 0: Success, the transmit queue is correctly set up.
+ *   - -ENOMEM: Unable to allocate the transmit ring descriptors.
+ */
+extern int rte_eth_tx_queue_setup(uint8_t port_id, uint16_t tx_queue_id,
+                                 uint16_t nb_tx_desc, unsigned int socket_id,
+                                 const struct rte_eth_txconf *tx_conf);
+
+/**
+ * Start an Ethernet device.
+ *
+ * The device start step is the last one and consists of setting the configured
+ * offload features and in starting the transmit and the receive units of the
+ * device.
+ * On success, all basic functions exported by the Ethernet API (link status,
+ * receive/transmit, and so on) can be invoked.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - 0: Success, Ethernet device started.
+ *   - <0: Error code of the driver device start function.
+ */
+extern int rte_eth_dev_start(uint8_t port_id);
+
+/**
+ * Stop an Ethernet device. The device can be restarted with a call to
+ * rte_eth_dev_start()
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_dev_stop(uint8_t port_id);
+
+/**
+ * Close an Ethernet device. The device cannot be restarted!
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_dev_close(uint8_t port_id);
+
+/**
+ * Enable receipt in promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_promiscuous_enable(uint8_t port_id);
+
+/**
+ * Disable receipt in promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_promiscuous_disable(uint8_t port_id);
+
+/**
+ * Return the value of promiscuous mode for an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (1) if promiscuous is enabled
+ *   - (0) if promiscuous is disabled.
+ *   - (-1) on error
+ */
+extern int rte_eth_promiscuous_get(uint8_t port_id);
+
+/**
+ * Enable the receipt of any multicast frame by an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_allmulticast_enable(uint8_t port_id);
+
+/**
+ * Disable the receipt of all multicast frames by an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_allmulticast_disable(uint8_t port_id);
+
+/**
+ * Return the value of allmulticast mode for an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (1) if allmulticast is enabled
+ *   - (0) if allmulticast is disabled.
+ *   - (-1) on error
+ */
+extern int rte_eth_allmulticast_get(uint8_t port_id);
+
+/**
+ * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
+ * or FULL-DUPLEX) of the physical link of an Ethernet device. It might need
+ * to wait up to 9 seconds in it.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param link
+ *   A pointer to an *rte_eth_link* structure to be filled with
+ *   the status, the speed and the mode of the Ethernet device link.
+ */
+extern void rte_eth_link_get(uint8_t port_id, struct rte_eth_link *link);
+
+/**
+ * Retrieve the status (ON/OFF), the speed (in Mbps) and the mode (HALF-DUPLEX
+ * or FULL-DUPLEX) of the physical link of an Ethernet device. It is a no-wait
+ * version of rte_eth_link_get().
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param link
+ *   A pointer to an *rte_eth_link* structure to be filled with
+ *   the status, the speed and the mode of the Ethernet device link.
+ */
+extern void rte_eth_link_get_nowait(uint8_t port_id,
+                               struct rte_eth_link *link);
+
+/**
+ * Retrieve the general I/O statistics of an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param stats
+ *   A pointer to a structure of type *rte_eth_stats* to be filled with
+ *   the values of device counters for the following set of statistics:
+ *   - *ipackets* with the total of successfully received packets.
+ *   - *opackets* with the total of successfully transmitted packets.
+ *   - *ibytes*   with the total of successfully received bytes.
+ *   - *obytes*   with the total of successfully transmitted bytes.
+ *   - *ierrors*  with the total of erroneous received packets.
+ *   - *oerrors*  with the total of failed transmitted packets.
+ */
+extern void rte_eth_stats_get(uint8_t port_id, struct rte_eth_stats *stats);
+
+/**
+ * Reset the general I/O statistics of an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ */
+extern void rte_eth_stats_reset(uint8_t port_id);
+
+/**
+ * Retrieve the Ethernet address of an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param mac_addr
+ *   A pointer to a structure of type *ether_addr* to be filled with
+ *   the Ethernet address of the Ethernet device.
+ */
+extern void rte_eth_macaddr_get(uint8_t port_id, struct ether_addr *mac_addr);
+
+/**
+ * Retrieve the contextual information of an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param dev_info
+ *   A pointer to a structure of type *rte_eth_dev_info* to be filled with
+ *   the contextual information of the Ethernet device.
+ */
+extern void rte_eth_dev_info_get(uint8_t port_id,
+                                struct rte_eth_dev_info *dev_info);
+
+/**
+ * Enable/Disable hardware filtering by an Ethernet device of received
+ * VLAN packets tagged with a given VLAN Tag Identifier.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param vlan_id
+ *   The VLAN Tag Identifier whose filtering must be enabled or disabled.
+ * @param on
+ *   If > 0, enable VLAN filtering of VLAN packets tagged with *vlan_id*.
+ *   Otherwise, disable VLAN filtering of VLAN packets tagged with *vlan_id*.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOSUP) if hardware-assisted VLAN filtering not configured.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if VLAN filtering on *port_id* disabled.
+ *   - (-EINVAL) if *vlan_id* > 4095.
+ */
+extern int rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on);
+
+/**
+ *
+ * Retrieve a burst of input packets from a receive queue of an Ethernet
+ * device. The retrieved packets are stored in *rte_mbuf* structures whose
+ * pointers are supplied in the *rx_pkts* array.
+ *
+ * The rte_eth_rx_burst() function loops, parsing the RX ring of the
+ * receive queue, up to *nb_pkts* packets, and for each completed RX
+ * descriptor in the ring, it performs the following operations:
+ *
+ * - Initialize the *rte_mbuf* data structure associated with the
+ *   RX descriptor according to the information provided by the NIC into
+ *   that RX descriptor.
+ *
+ * - Store the *rte_mbuf* data structure into the next entry of the
+ *   *rx_pkts* array.
+ *
+ * - Replenish the RX descriptor with a new *rte_mbuf* buffer
+ *   allocated from the memory pool associated with the receive queue at
+ *   initialization time.
+ *
+ * When retrieving an input packet that was scattered by the controller
+ * into multiple receive descriptors, the rte_eth_rx_burst() function
+ * appends the associated *rte_mbuf* buffers to the first buffer of the
+ * packet.
+ *
+ * The rte_eth_rx_burst() function returns the number of packets
+ * actually retrieved, which is the number of *rte_mbuf* data structures
+ * effectively supplied into the *rx_pkts* array.
+ * A return value equal to *nb_pkts* indicates that the RX queue contained
+ * at least *rx_pkts* packets, and this is likely to signify that other
+ * received packets remain in the input queue. Applications implementing
+ * a "retrieve as much received packets as possible" policy can check this
+ * specific case and keep invoking the rte_eth_rx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * This receive method has the following advantages:
+ *
+ * - It allows a run-to-completion network stack engine to retrieve and
+ *   to immediately process received packets in a fast burst-oriented
+ *   approach, avoiding the overhead of unnecessary intermediate packet
+ *   queue/dequeue operations.
+ *
+ * - Conversely, it also allows an asynchronous-oriented processing
+ *   method to retrieve bursts of received packets and to immediately
+ *   queue them for further parallel processing by another logical core,
+ *   for instance. However, instead of having received packets being
+ *   individually queued by the driver, this approach allows the invoker
+ *   of the rte_eth_rx_burst() function to queue a burst of retrieved
+ *   packets at a time and therefore dramatically reduce the cost of
+ *   enqueue/dequeue operations per packet.
+ *
+ * - It allows the rte_eth_rx_burst() function of the driver to take
+ *   advantage of burst-oriented hardware features (CPU cache,
+ *   prefetch instructions, and so on) to minimize the number of CPU
+ *   cycles per packet.
+ *
+ * To summarize, the proposed receive API enables many
+ * burst-oriented optimizations in both synchronous and asynchronous
+ * packet processing environments with no overhead in both cases.
+ *
+ * The rte_eth_rx_burst() function does not provide any error
+ * notification to avoid the corresponding overhead. As a hint, the
+ * upper-level application might check the status of the device link once
+ * being systematically returned a 0 value for a given number of tries.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param queue_id
+ *   The index of the receive queue from which to retrieve input packets.
+ *   The value must be in the range [0, nb_rx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @param rx_pkts
+ *   The address of an array of pointers to *rte_mbuf* structures that
+ *   must be large enough to store *nb_pkts* pointers in it.
+ * @param nb_pkts
+ *   The maximum number of packets to retrieve.
+ * @return
+ *   The number of packets actually retrieved, which is the number
+ *   of pointers to *rte_mbuf* structures effectively supplied to the
+ *   *rx_pkts* array.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern uint16_t rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+                                struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+#else
+static inline uint16_t
+rte_eth_rx_burst(uint8_t port_id, uint16_t queue_id,
+                struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev;
+
+       dev = &rte_eth_devices[port_id];
+       return (*dev->rx_pkt_burst)(dev->data->rx_queues[queue_id], rx_pkts, nb_pkts);
+}
+#endif
+
+/**
+ * Send a burst of output packets on a transmit queue of an Ethernet device.
+ *
+ * The rte_eth_tx_burst() function is invoked to transmit output packets
+ * on the output queue *queue_id* of the Ethernet device designated by its
+ * *port_id*.
+ * The *nb_pkts* parameter is the number of packets to send which are
+ * supplied in the *tx_pkts* array of *rte_mbuf* structures.
+ * The rte_eth_tx_burst() function loops, sending *nb_pkts* packets,
+ * up to the number of transmit descriptors available in the TX ring of the
+ * transmit queue.
+ * For each packet to send, the rte_eth_tx_burst() function performs
+ * the following operations:
+ *
+ * - Pick up the next available descriptor in the transmit ring.
+ *
+ * - Free the network buffer previously sent with that descriptor, if any.
+ *
+ * - Initialize the transmit descriptor with the information provided
+ *   in the *rte_mbuf data structure.
+ *
+ * In the case of a segmented packet composed of a list of *rte_mbuf* buffers,
+ * the rte_eth_tx_burst() function uses several transmit descriptors
+ * of the ring.
+ *
+ * The rte_eth_tx_burst() function returns the number of packets it
+ * actually sent. A return value equal to *nb_pkts* means that all packets
+ * have been sent, and this is likely to signify that other output packets
+ * could be immediately transmitted again. Applications that implement a
+ * "send as many packets to transmit as possible" policy can check this
+ * specific case and keep invoking the rte_eth_tx_burst() function until
+ * a value less than *nb_pkts* is returned.
+ *
+ * It is the responsibility of the rte_eth_tx_burst() function to
+ * transparently free the memory buffers of packets previously sent.
+ * This feature is driven by the *tx_free_thresh* value supplied to the
+ * rte_eth_dev_configure() function at device configuration time.
+ * When the number of previously sent packets reached the "minimum transmit
+ * packets to free" threshold, the rte_eth_tx_burst() function must
+ * [attempt to] free the *rte_mbuf*  buffers of those packets whose
+ * transmission was effectively completed.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param queue_id
+ *   The index of the transmit queue through which output packets must be
+ *   sent.
+ *   The value must be in the range [0, nb_tx_queue - 1] previously supplied
+ *   to rte_eth_dev_configure().
+ * @param tx_pkts
+ *   The address of an array of *nb_pkts* pointers to *rte_mbuf* structures
+ *   which contain the output packets.
+ * @param nb_pkts
+ *   The maximum number of packets to transmit.
+ * @return
+ *   The number of output packets actually stored in transmit descriptors of
+ *   the transmit ring. The return value can be less than the value of the
+ *   *tx_pkts* parameter when the transmit ring is full or has been filled up.
+ */
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+extern uint16_t rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+                                struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
+#else
+static inline uint16_t
+rte_eth_tx_burst(uint8_t port_id, uint16_t queue_id,
+                struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       struct rte_eth_dev *dev;
+
+       dev = &rte_eth_devices[port_id];
+       return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id], tx_pkts, nb_pkts);
+}
+#endif
+
+/**
+ * Setup a new signature filter rule on an Ethernet device
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_filter
+ *   The pointer to the fdir filter structure describing the signature filter
+ *   rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ * @param rx_queue
+ *   The index of the RX queue where to store RX packets matching the added
+ *   signature filter defined in fdir_filter.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the FDIR mode is not configured in signature mode
+ *               on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_add_signature_filter(uint8_t port_id,
+                                         struct rte_fdir_filter *fdir_filter,
+                                         uint8_t rx_queue);
+
+/**
+ * Update a signature filter rule on an Ethernet device.
+ * If the rule doesn't exits, it is created.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_ftr
+ *   The pointer to the structure describing the signature filter rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ * @param rx_queue
+ *   The index of the RX queue where to store RX packets matching the added
+ *   signature filter defined in fdir_ftr.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in signature mode
+ *     on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_update_signature_filter(uint8_t port_id,
+                                            struct rte_fdir_filter *fdir_ftr,
+                                            uint8_t rx_queue);
+
+/**
+ * Remove a signature filter rule on an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_ftr
+ *   The pointer to the structure describing the signature filter rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in signature mode
+ *     on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_remove_signature_filter(uint8_t port_id,
+                                            struct rte_fdir_filter *fdir_ftr);
+
+/**
+ * Retrieve the flow director information of an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir
+ *   A pointer to a structure of type *rte_eth_dev_fdir* to be filled with
+ *   the flow director information of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured on *port_id*.
+ */
+int rte_eth_dev_fdir_get_infos(uint8_t port_id, struct rte_eth_fdir *fdir);
+
+/**
+ * Add a new perfect filter rule on an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_filter
+ *   The pointer to the structure describing the perfect filter rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ *   IPv6 are not supported.
+ * @param soft_id
+ *    The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
+ *    packets matching the perfect filter.
+ * @param rx_queue
+ *   The index of the RX queue where to store RX packets matching the added
+ *   perfect filter defined in fdir_filter.
+ * @param drop
+ *    If drop is set to 1, matching RX packets are stored into the RX drop
+ *    queue defined in the rte_fdir_conf.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ *               on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_add_perfect_filter(uint8_t port_id,
+                                       struct rte_fdir_filter *fdir_filter,
+                                       uint16_t soft_id, uint8_t rx_queue,
+                                       uint8_t drop);
+
+/**
+ * Update a perfect filter rule on an Ethernet device.
+ * If the rule doesn't exits, it is created.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_filter
+ *   The pointer to the structure describing the perfect filter rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ *   IPv6 are not supported.
+ * @param soft_id
+ *    The 16-bit value supplied in the field hash.fdir.id of mbuf for RX
+ *    packets matching the perfect filter.
+ * @param rx_queue
+ *   The index of the RX queue where to store RX packets matching the added
+ *   perfect filter defined in fdir_filter.
+ * @param drop
+ *    If drop is set to 1, matching RX packets are stored into the RX drop
+ *    queue defined in the rte_fdir_conf.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ *      on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_update_perfect_filter(uint8_t port_id,
+                                          struct rte_fdir_filter *fdir_filter,
+                                          uint16_t soft_id, uint8_t rx_queue,
+                                          uint8_t drop);
+
+/**
+ * Remove a perfect filter rule on an Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_filter
+ *   The pointer to the structure describing the perfect filter rule.
+ *   The *rte_fdir_filter* structure includes the values of the different fields
+ *   to match: source and destination IP addresses, vlan id, flexbytes, source
+ *   and destination ports, and so on.
+ *   IPv6 are not supported.
+ * @param soft_id
+ *    The soft_id value provided when adding/updating the removed filter.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in perfect mode
+ *      on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct.
+ */
+int rte_eth_dev_fdir_remove_perfect_filter(uint8_t port_id,
+                                          struct rte_fdir_filter *fdir_filter,
+                                          uint16_t soft_id);
+/**
+ * Configure globally the masks for flow director mode for an Ethernet device.
+ * For example, the device can match packets with only the first 24 bits of
+ * the IPv4 source address.
+ *
+ * The following fields can be masked: IPv4 addresses and L4 port numbers.
+ * The following fields can be either enabled or disabled completely for the
+ * matching functionality: VLAN ID tag; VLAN Priority + CFI bit; Flexible 2-byte
+ * tuple.
+ * IPv6 masks are not supported.
+ *
+ * All filters must comply with the masks previously configured.
+ * For example, with a mask equal to 255.255.255.0 for the source IPv4 address,
+ * all IPv4 filters must be created with a source IPv4 address that fits the
+ * "X.X.X.0" format.
+ *
+ * This function flushes all filters that have been previously added in
+ * the device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fdir_mask
+ *   The pointer to the fdir mask structure describing relevant headers fields
+ *   and relevant bits to use when matching packets addresses and ports.
+ *   IPv6 masks are not supported.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV) if *port_id* invalid.
+ *   - (-ENOSYS) if the flow director mode is not configured in perfect
+ *      mode on *port_id*.
+ *   - (-EINVAL) if the fdir_filter information is not correct
+ */
+int rte_eth_dev_fdir_set_masks(uint8_t port_id,
+                              struct rte_fdir_masks *fdir_mask);
+
+/**
+ * The eth device event type for interrupt, and maybe others in the future.
+ */
+enum rte_eth_event_type {
+       RTE_ETH_EVENT_UNKNOWN,  /**< unknown event type */
+       RTE_ETH_EVENT_INTR_LSC, /**< lsc interrupt event */
+       RTE_ETH_EVENT_MAX       /**< max value of this enum */
+};
+
+typedef void (*rte_eth_dev_cb_fn)(uint8_t port_id, \
+               enum rte_eth_event_type event, void *cb_arg);
+/**< user application callback to be registered for interrupts */
+
+
+
+/**
+ * Register a callback function for specific port id.
+ *
+ * @param port_id
+ *  Port id.
+ * @param event
+ *  Event interested.
+ * @param cb_fn
+ *  User supplied callback function to be called.
+ * @param cb_arg
+ *  Pointer to the parameters for the registered callback.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_eth_dev_callback_register(uint8_t port_id,
+                       enum rte_eth_event_type event,
+               rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * Unregister a callback function for specific port id.
+ *
+ * @param port_id
+ *  Port id.
+ * @param event
+ *  Event interested.
+ * @param cb_fn
+ *  User supplied callback function to be called.
+ * @param cb_arg
+ *  Pointer to the parameters for the registered callback. -1 means to
+ *  remove all for the same callback address and same event.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+int rte_eth_dev_callback_unregister(uint8_t port_id,
+                       enum rte_eth_event_type event,
+               rte_eth_dev_cb_fn cb_fn, void *cb_arg);
+
+/**
+ * @internal Executes all the user application registered callbacks for
+ * the specific device. It is for DPDK internal user only. User
+ * application should not call it directly.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ * @param event
+ *  Eth device interrupt event type.
+ *
+ * @return
+ *  void
+ */
+void _rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+                               enum rte_eth_event_type event);
+
+/**
+ * Turn on the LED on the Ethernet device.
+ * This function turns on the LED on the Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ *     that operation.
+ *   - (-ENODEV) if *port_id* invalid.
+ */
+int  rte_eth_led_on(uint8_t port_id);
+
+/**
+ * Turn off the LED on the Ethernet device.
+ * This function turns off the LED on the Ethernet device.
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if underlying hardware OR driver doesn't support
+ *     that operation.
+ *   - (-ENODEV) if *port_id* invalid.
+ */
+int  rte_eth_led_off(uint8_t port_id);
+
+/**
+ * Configure the Ethernet link flow control for Ethernet device
+ *
+ * @param port_id
+ *   The port identifier of the Ethernet device.
+ * @param fc_conf
+ *   The pointer to the structure of the flow control parameters.
+ * @return
+ *   - (0) if successful.
+ *   - (-ENOTSUP) if hardware doesn't support flow director mode.
+ *   - (-ENODEV)  if *port_id* invalid.
+ *   - (-EINVAL)  if bad parameter
+ *   - (-EIO)     if flow control setup failure
+ */
+int rte_eth_dev_flow_ctrl_set(uint8_t port_id,
+                               struct rte_eth_fc_conf *fc_conf);
+
+/**
+ * Add a MAC address to an internal array of addresses used to enable whitelist
+ * filtering to accept packets only if the destination MAC address matches.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac_addr
+ *   The MAC address to add.
+ * @param pool
+ *   VMDq pool index to associate address with (if VMDq is enabled). If VMDq is
+ *   not enabled, this should be set to 0.
+ * @return
+ *   - (0) if successfully added or *mac_addr" was already added.
+ *   - (-ENOTSUP) if hardware doesn't support this feature.
+ *   - (-ENODEV) if *port* is invalid.
+ *   - (-ENOSPC) if no more MAC addresses can be added.
+ *   - (-EINVAL) if MAC address is invalid.
+ */
+int rte_eth_dev_mac_addr_add(uint8_t port, struct ether_addr *mac_addr,
+                               uint32_t pool);
+
+/**
+ * Remove a MAC address from the internal array of addresses.
+ *
+ * @param port
+ *   The port identifier of the Ethernet device.
+ * @param mac_addr
+ *   MAC address to remove.
+ * @return
+ *   - (0) if successful, or *mac_addr* didn't exist.
+ *   - (-ENOTSUP) if hardware doesn't support.
+ *   - (-ENODEV) if *port* invalid.
+ *   - (-EADDRINUSE) if attempting to remove the default MAC address
+ */
+int rte_eth_dev_mac_addr_remove(uint8_t port, struct ether_addr *mac_addr);
+
+
+/*-------------------------- Deprecated definitions --------------------------*/
+
+/* Needed to stop deprecation warnings becoming errors with GCC. */
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic warning "-Wdeprecated-declarations"
+#endif
+
+#ifdef RTE_LIBRTE_82576_PMD
+#pragma message "\nWARNING: CONFIG_RTE_LIBRTE_82576_PMD is deprecated. " \
+"CONFIG_RTE_LIBRTE_IGB_PMD must be set in the config file to use Intel(R) " \
+"DPDK supported Gigabit Ethernet Controllers."
+#endif
+
+#ifdef RTE_LIBRTE_IGB_PMD
+/**
+ * @deprecated The config file option CONFIG_RTE_LIBRTE_82576_PMD and resulting
+ * preprocessor define RTE_LIBRTE_82576_PMD are deprecated.
+ * CONFIG_RTE_LIBRTE_IGB_PMD must be set in the config file to use Intel(R) DPDK
+ * supported Gigabit Ethernet Controllers, and RTE_LIBRTE_IGB_PMD should be used
+ * in code.
+ */
+#define RTE_LIBRTE_82576_PMD 1
+#endif
+
+/**
+ * @deprecated rte_82576_pmd_init() is deprecated and will be removed from
+ * future versions of Intel(R) DPDK. It has been replaced by rte_igb_pmd_init().
+ *
+ * @return
+ *   0 on success
+ */
+static inline int __attribute__((deprecated))
+rte_82576_pmd_init(void) {
+       RTE_LOG(WARNING, PMD, "rte_82576_pmd_init() is deprecated and will be "
+                       "removed from future version of Intel(R) DPDK. It has "
+                       "been replaced by rte_igb_pmd_init()");
+       return rte_igb_pmd_init();
+}
+
+
+#ifdef RTE_LIBRTE_82599_PMD
+#pragma message "\nWARNING: CONFIG_RTE_LIBRTE_82599_PMD is deprecated. " \
+"CONFIG_RTE_LIBRTE_IXGBE_PMD must be set in the config file to use Intel(R) " \
+"DPDK supported 10 Gigabit Ethernet Controllers."
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_PMD
+/**
+ * @deprecated The config file option CONFIG_RTE_LIBRTE_82599_PMD and resulting
+ * preprocessor define RTE_LIBRTE_82599_PMD are deprecated.
+ * CONFIG_RTE_LIBRTE_IXGBE_PMD must be set in the config file to use Intel(R)
+ * DPDK supported Gigabit Ethernet Controllers, and RTE_LIBRTE_IXGBE_PMD should
+ * be used in code.
+ */
+#define RTE_LIBRTE_82599_PMD 1
+#endif
+
+/**
+ * @deprecated rte_82599_pmd_init() is deprecated and will be removed from
+ * future versions of Intel(R) DPDK. It has been replaced by
+ * rte_ixgbe_pmd_init().
+ *
+ * @return
+ *   0 on success
+ */
+static inline int __attribute__((deprecated))
+rte_82599_pmd_init(void) {
+       RTE_LOG(WARNING, PMD, "rte_82599_pmd_init() is deprecated and will be "
+                       "removed from future version of Intel(R) DPDK. It has "
+                       "been replaced by rte_ixgbe_pmd_init()");
+       return rte_ixgbe_pmd_init();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHDEV_H_ */
diff --git a/lib/librte_ether/rte_ether.h b/lib/librte_ether/rte_ether.h
new file mode 100644 (file)
index 0000000..27cadaf
--- /dev/null
@@ -0,0 +1,256 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_ETHER_H_
+#define _RTE_ETHER_H_
+
+/**
+ * @file
+ *
+ * Ethernet Helpers in RTE
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#define ETHER_ADDR_LEN  6 /**< Length of Ethernet address. */
+#define ETHER_TYPE_LEN  2 /**< Length of Ethernet type field. */
+#define ETHER_CRC_LEN   4 /**< Length of Ethernet CRC. */
+#define ETHER_HDR_LEN   \
+       (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) /**< Length of Ethernet header. */
+#define ETHER_MIN_LEN   64    /**< Minimum frame len, including CRC. */
+#define ETHER_MAX_LEN   1518  /**< Maximum frame len, including CRC. */
+#define ETHER_MTU       \
+       (ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN) /**< Ethernet MTU. */
+
+#define ETHER_MAX_VLAN_FRAME_LEN \
+       (ETHER_MAX_LEN + 4) /**< Maximum VLAN frame length, including CRC. */
+
+#define ETHER_MAX_JUMBO_FRAME_LEN \
+       0x3F00 /**< Maximum Jumbo frame length, including CRC. */
+
+/**
+ * Ethernet address:
+ * A universally administered address is uniquely assigned to a device by its
+ * manufacturer. The first three octets (in transmission order) contain the
+ * Organizationally Unique Identifier (OUI). The following three (MAC-48 and
+ * EUI-48) octets are assigned by that organization with the only constraint
+ * of uniqueness.
+ * A locally administered address is assigned to a device by a network
+ * administrator and does not contain OUIs.
+ * See http://standards.ieee.org/regauth/groupmac/tutorial.html
+ */
+struct ether_addr {
+       uint8_t addr_bytes[ETHER_ADDR_LEN]; /**< Address bytes in transmission order */
+} __attribute__((__packed__));
+
+#define ETHER_LOCAL_ADMIN_ADDR 0x02 /**< Locally assigned Eth. address. */
+#define ETHER_GROUP_ADDR       0x01 /**< Multicast or broadcast Eth. address. */
+
+/**
+ * Check if an Ethernet address is filled with zeros.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is filled with zeros;
+ *   false (0) otherwise.
+ */
+static inline int is_zero_ether_addr(const struct ether_addr *ea)
+{
+       int i;
+       for (i = 0; i < ETHER_ADDR_LEN; i++)
+               if (ea->addr_bytes[i] != 0x00)
+                       return 0;
+       return 1;
+}
+
+/**
+ * Check if an Ethernet address is a unicast address.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is a unicast address;
+ *   false (0) otherwise.
+ */
+static inline int is_unicast_ether_addr(const struct ether_addr *ea)
+{
+       return ((ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0);
+}
+
+/**
+ * Check if an Ethernet address is a multicast address.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is a multicast address;
+ *   false (0) otherwise.
+ */
+static inline int is_multicast_ether_addr(const struct ether_addr *ea)
+{
+       return (ea->addr_bytes[0] & ETHER_GROUP_ADDR);
+}
+
+/**
+ * Check if an Ethernet address is a broadcast address.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is a broadcast address;
+ *   false (0) otherwise.
+ */
+static inline int is_broadcast_ether_addr(const struct ether_addr *ea)
+{
+       const uint16_t *ea_words = (const uint16_t *)ea;
+
+       return (ea_words[0] == 0xFFFF && ea_words[1] == 0xFFFF &&
+               ea_words[2] == 0xFFFF);
+}
+
+/**
+ * Check if an Ethernet address is a universally assigned address.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is a universally assigned address;
+ *   false (0) otherwise.
+ */
+static inline int is_universal_ether_addr(const struct ether_addr *ea)
+{
+       return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0);
+}
+
+/**
+ * Check if an Ethernet address is a locally assigned address.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is a locally assigned address;
+ *   false (0) otherwise.
+ */
+static inline int is_local_admin_ether_addr(const struct ether_addr *ea)
+{
+       return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 1);
+}
+
+/**
+ * Check if an Ethernet address is a valid address. Checks that the address is a
+ * unicast address and is not filled with zeros.
+ *
+ * @param ea
+ *   A pointer to a ether_addr structure containing the ethernet address
+ *   to check.
+ * @return
+ *   True  (1) if the given ethernet address is valid;
+ *   false (0) otherwise.
+ */
+static inline int is_valid_assigned_ether_addr(const struct ether_addr *ea)
+{
+       return (is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea)));
+}
+
+/**
+ * Fast copy an Ethernet address.
+ *
+ * @param ea_from
+ *   A pointer to a ether_addr structure holding the Ethernet address to copy.
+ * @param ea_to
+ *   A pointer to a ether_addr structure where to copy the Ethernet address.
+ */
+static inline void ether_addr_copy(const struct ether_addr *ea_from,
+                                  struct ether_addr *ea_to)
+{
+#ifdef __INTEL_COMPILER
+       uint16_t *from_words = (uint16_t *)(ea_from->addr_bytes);
+       uint16_t *to_words   = (uint16_t *)(ea_to->addr_bytes);
+
+       to_words[0] = from_words[0];
+       to_words[1] = from_words[1];
+       to_words[2] = from_words[2];
+#else
+       /*
+        * Use the common way, because of a strange gcc warning.
+        */
+       *ea_to = *ea_from;
+#endif
+}
+
+/**
+ * Ethernet header: Contains the destination address, source address
+ * and frame type.
+ */
+struct ether_hdr {
+       struct ether_addr d_addr; /**< Destination address. */
+       struct ether_addr s_addr; /**< Source address. */
+       uint16_t ether_type;      /**< Frame type. */
+} __attribute__((__packed__));
+
+/**
+ * Ethernet VLAN Header.
+ * Contains the 16-bit VLAN Tag Control Identifier and the Ethernet type
+ * of the encapsulated frame.
+ */
+struct vlan_hdr {
+       uint16_t vlan_tci; /**< Priority (3) + CFI (1) + Identifier Code (12) */
+       uint16_t eth_proto;/**< Ethernet type of encapsulated frame. */
+} __attribute__((__packed__));
+
+/* Ethernet frame types */
+#define ETHER_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
+#define ETHER_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
+#define ETHER_TYPE_ARP  0x0806 /**< Arp Protocol. */
+#define ETHER_TYPE_RARP 0x8035 /**< Reverse Arp Protocol. */
+#define ETHER_TYPE_VLAN 0x8100 /**< IEEE 802.1Q VLAN tagging. */
+#define ETHER_TYPE_1588 0x88F7 /**< IEEE 802.1AS 1588 Precise Time Protocol. */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_ETHER_H_ */
diff --git a/lib/librte_hash/Makefile b/lib/librte_hash/Makefile
new file mode 100644 (file)
index 0000000..103ed79
--- /dev/null
@@ -0,0 +1,55 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_hash.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) := rte_hash.c
+SRCS-$(CONFIG_RTE_LIBRTE_HASH) += rte_fbk_hash.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include := rte_hash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_hash_crc.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_jhash.h
+SYMLINK-$(CONFIG_RTE_LIBRTE_HASH)-include += rte_fbk_hash.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_HASH) += lib/librte_eal lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_hash/rte_fbk_hash.c b/lib/librte_hash/rte_fbk_hash.c
new file mode 100644 (file)
index 0000000..0b1bd59
--- /dev/null
@@ -0,0 +1,210 @@
+/**
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+
+#include <sys/queue.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_hash_crc.h>
+#include <rte_malloc.h>
+#include <rte_common.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+
+#include "rte_fbk_hash.h"
+#include "rte_jhash.h"
+#include "rte_hash_crc.h"
+
+TAILQ_HEAD(rte_fbk_hash_list, rte_fbk_hash_table);
+
+/* global list of fbk_hashes (used for debug/dump) */
+static struct rte_fbk_hash_list *fbk_hash_list = NULL;
+
+/* macro to prevent duplication of list creation check code */
+#define CHECK_FBK_HASH_LIST_CREATED() do { \
+       if (fbk_hash_list == NULL) \
+               if ((fbk_hash_list = RTE_TAILQ_RESERVE("RTE_FBK_HASH", \
+                               rte_fbk_hash_list)) == NULL){ \
+                       rte_errno = E_RTE_NO_TAILQ; \
+                       return NULL; \
+               } \
+} while (0)
+
+
+/**
+ * Performs a lookup for an existing hash table, and returns a pointer to
+ * the table if found.
+ *
+ * @param name
+ *   Name of the hash table to find
+ *
+ * @return
+ *   pointer to hash table structure or NULL on error.
+ */
+struct rte_fbk_hash_table *
+rte_fbk_hash_find_existing(const char *name)
+{
+       struct rte_fbk_hash_table *h;
+
+       /* check that we have an initialised tail queue */
+       CHECK_FBK_HASH_LIST_CREATED();
+
+       TAILQ_FOREACH(h, fbk_hash_list, next) {
+               if (strncmp(name, h->name, RTE_FBK_HASH_NAMESIZE) == 0)
+                       break;
+       }
+       if (h == NULL)
+               rte_errno = ENOENT;
+       return h;
+}
+
+/**
+ * Create a new hash table for use with four byte keys.
+ *
+ * @param params
+ *   Parameters used in creation of hash table.
+ *
+ * @return
+ *   Pointer to hash table structure that is used in future hash table
+ *   operations, or NULL on error.
+ */
+struct rte_fbk_hash_table *
+rte_fbk_hash_create(const struct rte_fbk_hash_params *params)
+{
+       struct rte_fbk_hash_table *ht;
+       char hash_name[RTE_FBK_HASH_NAMESIZE];
+       const uint32_t mem_size =
+                       sizeof(*ht) + (sizeof(ht->t[0]) * params->entries);
+       uint32_t i;
+
+       /* check that we have access to create things in shared memory. */
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY){
+               rte_errno = E_RTE_SECONDARY;
+               return NULL;
+       }
+
+       /* check that we have an initialised tail queue */
+       CHECK_FBK_HASH_LIST_CREATED();
+
+       /* Error checking of parameters. */
+       if ((!rte_is_power_of_2(params->entries)) ||
+                       (!rte_is_power_of_2(params->entries_per_bucket)) ||
+                       (params->entries == 0) ||
+                       (params->entries_per_bucket == 0) ||
+                       (params->entries_per_bucket > params->entries) ||
+                       (params->entries > RTE_FBK_HASH_ENTRIES_MAX) ||
+                       (params->entries_per_bucket > RTE_FBK_HASH_ENTRIES_MAX)){
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       rte_snprintf(hash_name, sizeof(hash_name), "FBK_%s", params->name);
+
+       /* Allocate memory for table. */
+#if defined(RTE_LIBRTE_HASH_USE_MEMZONE)
+       const struct rte_memzone *mz;
+       mz = rte_memzone_reserve(hash_name, mem_size, params->socket_id, 0);
+       if (mz == NULL)
+               return NULL;
+       ht = (struct rte_fbk_hash_table *)mz->addr;
+#else
+       ht = (struct rte_fbk_hash_table *)rte_malloc(hash_name, mem_size, 0);
+       if (ht == NULL)
+               return NULL;
+#endif
+       memset(ht, 0, mem_size);
+
+       /* Set up hash table context. */
+       rte_snprintf(ht->name, sizeof(ht->name), "%s", params->name);
+       ht->entries = params->entries;
+       ht->entries_per_bucket = params->entries_per_bucket;
+       ht->used_entries = 0;
+       ht->bucket_mask = (params->entries / params->entries_per_bucket) - 1;
+       for (ht->bucket_shift = 0, i = 1;
+           (params->entries_per_bucket & i) == 0;
+           ht->bucket_shift++, i <<= 1)
+               ; /* empty loop body */
+
+       if (params->hash_func != NULL) {
+               ht->hash_func = params->hash_func;
+               ht->init_val = params->init_val;
+       }
+       else {
+               ht->hash_func = RTE_FBK_HASH_FUNC_DEFAULT;
+               ht->init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT;
+       }
+
+       if (ht->hash_func == rte_hash_crc_4byte &&
+                       !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2)) {
+               RTE_LOG(WARNING, HASH, "CRC32 instruction requires SSE4.2, "
+                               "which is not supported on this system. "
+                               "Falling back to software hash\n.");
+               ht->hash_func = rte_jhash_1word;
+       }
+
+       TAILQ_INSERT_TAIL(fbk_hash_list, ht, next);
+       return ht;
+}
+
+/**
+ * Free all memory used by a hash table.
+ *
+ * @param ht
+ *   Hash table to deallocate.
+ */
+void
+rte_fbk_hash_free(struct rte_fbk_hash_table *ht)
+{
+       if (ht == NULL)
+               return;
+       /* No way to deallocate memzones - but can de-allocate from malloc */
+#if !defined(RTE_LIBRTE_HASH_USE_MEMZONE)
+       TAILQ_REMOVE(fbk_hash_list, ht, next);
+       rte_free(ht);
+#endif
+       RTE_SET_USED(ht);
+       return;
+}
+
diff --git a/lib/librte_hash/rte_fbk_hash.h b/lib/librte_hash/rte_fbk_hash.h
new file mode 100644 (file)
index 0000000..2d16046
--- /dev/null
@@ -0,0 +1,334 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_FBK_HASH_H_
+#define _RTE_FBK_HASH_H_
+
+/**
+ * @file
+ *
+ * This is a hash table implementation for four byte keys (fbk).
+ *
+ * Note that the return value of the add function should always be checked as,
+ * if a bucket is full, the key is not added even if there is space in other
+ * buckets. This keeps the lookup function very simple and therefore fast.
+ */
+
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <rte_hash_crc.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+
+#ifndef RTE_FBK_HASH_FUNC_DEFAULT
+/** Default four-byte key hash function if none is specified. */
+#define RTE_FBK_HASH_FUNC_DEFAULT              rte_hash_crc_4byte
+#endif
+
+#ifndef RTE_FBK_HASH_INIT_VAL_DEFAULT
+/** Initialising value used when calculating hash. */
+#define RTE_FBK_HASH_INIT_VAL_DEFAULT          0xFFFFFFFF
+#endif
+
+/** The maximum number of entries in the hash table that is supported. */
+#define RTE_FBK_HASH_ENTRIES_MAX               (1 << 20)
+
+/** The maximum number of entries in each bucket that is supported. */
+#define RTE_FBK_HASH_ENTRIES_PER_BUCKET_MAX    256
+
+/** Maximum size of string for naming the hash. */
+#define RTE_FBK_HASH_NAMESIZE                  32
+
+/** Type of function that can be used for calculating the hash value. */
+typedef uint32_t (*rte_fbk_hash_fn)(uint32_t key, uint32_t init_val);
+
+/** Parameters used when creating four-byte key hash table. */
+struct rte_fbk_hash_params {
+       const char *name;               /**< Name of the hash table. */
+       uint32_t entries;               /**< Total number of entries. */
+       uint32_t entries_per_bucket;    /**< Number of entries in a bucket. */
+       int socket_id;                  /**< Socket to allocate memory on. */
+       rte_fbk_hash_fn hash_func;      /**< The hash function. */
+       uint32_t init_val;              /**< For initialising hash function. */
+};
+
+/** Individual entry in the four-byte key hash table. */
+union rte_fbk_hash_entry {
+       uint64_t whole_entry;           /**< For accessing entire entry. */
+       struct {
+               uint16_t is_entry;      /**< Non-zero if entry is active. */
+               uint16_t value;         /**< Value returned by lookup. */
+               uint32_t key;           /**< Key used to find value. */
+       } entry;                        /**< For accessing each entry part. */
+} ;
+
+
+
+/** The four-byte key hash table structure. */
+struct rte_fbk_hash_table {
+       TAILQ_ENTRY(rte_fbk_hash_table) next;   /**< Linked list. */
+
+       char name[RTE_FBK_HASH_NAMESIZE];       /**< Name of the hash. */
+       uint32_t entries;               /**< Total number of entries. */
+       uint32_t entries_per_bucket;    /**< Number of entries in a bucket. */
+       uint32_t used_entries;          /**< How many entries are used. */
+       uint32_t bucket_mask;           /**< To find which bucket the key is in. */
+       uint32_t bucket_shift;          /**< Convert bucket to table offset. */
+       rte_fbk_hash_fn hash_func;      /**< The hash function. */
+       uint32_t init_val;              /**< For initialising hash function. */
+
+       /** A flat table of all buckets. */
+       union rte_fbk_hash_entry t[0];
+};
+
+/**
+ * Find the offset into hash table of the bucket containing a particular key.
+ *
+ * @param ht
+ *   Pointer to hash table.
+ * @param key
+ *   Key to calculate bucket for.
+ * @return
+ *   Offset into hash table.
+ */
+static inline uint32_t
+rte_fbk_hash_get_bucket(const struct rte_fbk_hash_table *ht, uint32_t key)
+{
+       return (ht->hash_func(key, ht->init_val) & ht->bucket_mask) <<
+                       ht->bucket_shift;
+}
+
+
+/**
+ * Add a key to an existing hash table. This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param ht
+ *   Hash table to add the key to.
+ * @param key
+ *   Key to add to the hash table.
+ * @param value
+ *   Value to associate with key.
+ * @return
+ *   0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_add_key(struct rte_fbk_hash_table *ht,
+                       uint32_t key, uint16_t value)
+{
+       /*
+        * The writing of a new value to the hash table is done as a single
+        * 64bit operation. This should help prevent individual entries being
+        * corrupted due to race conditions, but it's still possible to
+        * overwrite entries that have just been made valid.
+        */
+       const uint64_t new_entry = ((uint64_t)(key) << 32) |
+                       ((uint64_t)(value) << 16) |
+                       1;  /* 1 = is_entry bit. */
+       const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key);
+       uint32_t i;
+
+       for (i = 0; i < ht->entries_per_bucket; i++) {
+               /* Set entry if unused. */
+               if (! ht->t[bucket + i].entry.is_entry) {
+                       ht->t[bucket + i].whole_entry = new_entry;
+                       ht->used_entries++;
+                       return 0;
+               }
+               /* Change value if key already exists. */
+               if (ht->t[bucket + i].entry.key == key) {
+                       ht->t[bucket + i].entry.value = value;
+                       return 0;
+               }
+       }
+
+       return -ENOSPC; /* No space in bucket. */
+}
+
+/**
+ * Remove a key from an existing hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param ht
+ *   Hash table to remove the key from.
+ * @param key
+ *   Key to remove from the hash table.
+ * @return
+ *   0 if ok, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_delete_key(struct rte_fbk_hash_table *ht, uint32_t key)
+{
+       const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key);
+       uint32_t last_entry = ht->entries_per_bucket - 1;
+       uint32_t i, j;
+
+       for (i = 0; i < ht->entries_per_bucket; i++) {
+               if (ht->t[bucket + i].entry.key == key) {
+                       /* Find last key in bucket. */
+                       for (j = ht->entries_per_bucket - 1; j > i; j-- ) {
+                               if (! ht->t[bucket + j].entry.is_entry) {
+                                       last_entry = j - 1;
+                               }
+                       }
+                       /*
+                        * Move the last key to the deleted key's position, and
+                        * delete the last key. lastEntry and i may be same but
+                        * it doesn't matter.
+                        */
+                       ht->t[bucket + i].whole_entry =
+                                       ht->t[bucket + last_entry].whole_entry;
+                       ht->t[bucket + last_entry].whole_entry = 0;
+
+                       ht->used_entries--;
+                       return 0;
+               }
+       }
+
+       return -ENOENT; /* Key didn't exist. */
+}
+
+/**
+ * Find a key in the hash table. This operation is multi-thread safe.
+ *
+ * @param ht
+ *   Hash table to look in.
+ * @param key
+ *   Key to find.
+ * @return
+ *   The value that was associated with the key, or negative value on error.
+ */
+static inline int
+rte_fbk_hash_lookup(const struct rte_fbk_hash_table *ht, uint32_t key)
+{
+       const uint32_t bucket = rte_fbk_hash_get_bucket(ht, key);
+       union rte_fbk_hash_entry current_entry;
+       uint32_t i;
+
+       for (i = 0; i < ht->entries_per_bucket; i++) {
+               /* Single read of entry, which should be atomic. */
+               current_entry.whole_entry = ht->t[bucket + i].whole_entry;
+               if (! current_entry.entry.is_entry) {
+                       return -ENOENT; /* Error once we hit an empty field. */
+               }
+               if (current_entry.entry.key == key) {
+                       return current_entry.entry.value;
+               }
+       }
+       return -ENOENT; /* Key didn't exist. */
+}
+
+/**
+ * Delete all entries in a hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param ht
+ *   Hash table to delete entries in.
+ */
+static inline void
+rte_fbk_hash_clear_all(struct rte_fbk_hash_table *ht)
+{
+       memset(ht->t, 0, sizeof(ht->t[0]) * ht->entries);
+       ht->used_entries = 0;
+}
+
+/**
+ * Find what fraction of entries are being used.
+ *
+ * @param ht
+ *   Hash table to find how many entries are being used in.
+ * @return
+ *   Load factor of the hash table, or negative value on error.
+ */
+static inline double
+rte_fbk_hash_get_load_factor(struct rte_fbk_hash_table *ht)
+{
+       return (double)ht->used_entries / (double)ht->entries;
+}
+
+/**
+ * Performs a lookup for an existing hash table, and returns a pointer to
+ * the table if found.
+ *
+ * @param name
+ *   Name of the hash table to find
+ *
+ * @return
+ *   pointer to hash table structure or NULL on error with rte_errno
+ *   set appropriately. Possible rte_errno values include:
+ *    - ENOENT - required entry not available to return.
+ */
+struct rte_fbk_hash_table *rte_fbk_hash_find_existing(const char *name);
+
+/**
+ * Create a new hash table for use with four byte keys.
+ *
+ * @param params
+ *   Parameters used in creation of hash table.
+ *
+ * @return
+ *   Pointer to hash table structure that is used in future hash table
+ *   operations, or NULL on error with rte_errno set appropriately.
+ *   Possible rte_errno error values include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - E_RTE_NO_TAILQ - no tailq list could be got for the fbk hash table list
+ *    - EINVAL - invalid parameter value passed to function
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_fbk_hash_table * \
+rte_fbk_hash_create(const struct rte_fbk_hash_params *params);
+
+/**
+ * Free all memory used by a hash table.
+ * Has no effect on hash tables allocated in memory zones
+ *
+ * @param ht
+ *   Hash table to deallocate.
+ */
+void rte_fbk_hash_free(struct rte_fbk_hash_table *ht);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_FBK_HASH_H_ */
diff --git a/lib/librte_hash/rte_hash.c b/lib/librte_hash/rte_hash.c
new file mode 100644 (file)
index 0000000..76cba41
--- /dev/null
@@ -0,0 +1,407 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>         /* for definition of CACHE_LINE_SIZE */
+#include <rte_log.h>
+#include <rte_memcpy.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+#include <rte_cpuflags.h>
+#include <rte_log.h>
+
+#include "rte_hash.h"
+#include "rte_jhash.h"
+#include "rte_hash_crc.h"
+
+
+TAILQ_HEAD(rte_hash_list, rte_hash);
+
+/* global list of hashes (used for debug/dump) */
+static struct rte_hash_list *hash_list;
+
+/* macro to prevent duplication of list creation check code */
+#define CHECK_HASH_LIST_CREATED() do { \
+       if (hash_list == NULL) \
+               if ((hash_list = RTE_TAILQ_RESERVE("RTE_HASH", rte_hash_list)) == NULL){ \
+                       rte_errno = E_RTE_NO_TAILQ; \
+                       return NULL; \
+               } \
+} while (0)
+
+/* Macro to enable/disable run-time checking of function parameters */
+#if defined(RTE_LIBRTE_HASH_DEBUG)
+#define RETURN_IF_TRUE(cond, retval) do { \
+       if (cond) return (retval); \
+} while (0)
+#else
+#define RETURN_IF_TRUE(cond, retval)
+#endif
+
+/* Hash function used if none is specified */
+#define DEFAULT_HASH_FUNC       rte_hash_crc
+
+/* Signature bucket size is a multiple of this value */
+#define SIG_BUCKET_ALIGNMENT    16
+
+/* Stoered key size is a multiple of this value */
+#define KEY_ALIGNMENT           16
+
+/* The high bit is always set in real signatures */
+#define NULL_SIGNATURE          0
+
+/* Returns a pointer to the first signature in specified bucket. */
+static inline hash_sig_t *
+get_sig_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
+{
+       return (hash_sig_t *)
+                       &(h->sig_tbl[bucket_index * h->sig_tbl_bucket_size]);
+}
+
+/* Returns a pointer to the first key in specified bucket. */
+static inline uint8_t *
+get_key_tbl_bucket(const struct rte_hash *h, uint32_t bucket_index)
+{
+       return (uint8_t *) &(h->key_tbl[bucket_index * h->bucket_entries *
+                                    h->key_tbl_key_size]);
+}
+
+/* Returns a pointer to a key at a specific position in a specified bucket. */
+static inline void *
+get_key_from_bucket(const struct rte_hash *h, uint8_t *bkt, uint32_t pos)
+{
+       return (void *) &bkt[pos * h->key_tbl_key_size];
+}
+
+/* Does integer division with rounding-up of result. */
+static inline uint32_t
+div_roundup(uint32_t numerator, uint32_t denominator)
+{
+       return (numerator + denominator - 1) / denominator;
+}
+
+/* Increases a size (if needed) to a multiple of alignment. */
+static inline uint32_t
+align_size(uint32_t val, uint32_t alignment)
+{
+       return alignment * div_roundup(val, alignment);
+}
+
+/* Returns the index into the bucket of the first occurrence of a signature. */
+static inline int
+find_first(uint32_t sig, const uint32_t *sig_bucket, uint32_t num_sigs)
+{
+       uint32_t i;
+       for (i = 0; i < num_sigs; i++) {
+               if (sig == sig_bucket[i])
+                       return i;
+       }
+       return -1;
+}
+
+struct rte_hash *
+rte_hash_find_existing(const char *name)
+{
+       struct rte_hash *h;
+
+       /* check that we have an initialised tail queue */
+       CHECK_HASH_LIST_CREATED();
+
+       TAILQ_FOREACH(h, hash_list, next) {
+               if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
+                       break;
+       }
+       if (h == NULL)
+               rte_errno = ENOENT;
+       return h;
+}
+
+struct rte_hash *
+rte_hash_create(const struct rte_hash_parameters *params)
+{
+       struct rte_hash *h = NULL;
+       uint32_t num_buckets, sig_bucket_size, key_size,
+               hash_tbl_size, sig_tbl_size, key_tbl_size, mem_size;
+       char hash_name[RTE_HASH_NAMESIZE];
+
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY){
+               rte_errno = E_RTE_SECONDARY;
+               return NULL;
+       }
+
+       /* check that we have an initialised tail queue */
+       CHECK_HASH_LIST_CREATED();
+
+       /* Check for valid parameters */
+       if ((params == NULL) ||
+                       (params->entries > RTE_HASH_ENTRIES_MAX) ||
+                       (params->bucket_entries > RTE_HASH_BUCKET_ENTRIES_MAX) ||
+                       (params->entries < params->bucket_entries) ||
+                       !rte_is_power_of_2(params->entries) ||
+                       !rte_is_power_of_2(params->bucket_entries) ||
+                       (params->key_len == 0) ||
+                       (params->key_len > RTE_HASH_KEY_LENGTH_MAX)) {
+               rte_errno = EINVAL;
+               RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
+               return NULL;
+       }
+
+       rte_snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
+
+       /* Calculate hash dimensions */
+       num_buckets = params->entries / params->bucket_entries;
+       sig_bucket_size = align_size(params->bucket_entries *
+                                    sizeof(hash_sig_t), SIG_BUCKET_ALIGNMENT);
+       key_size =  align_size(params->key_len, KEY_ALIGNMENT);
+
+       hash_tbl_size = align_size(sizeof(struct rte_hash), CACHE_LINE_SIZE);
+       sig_tbl_size = align_size(num_buckets * sig_bucket_size,
+                                 CACHE_LINE_SIZE);
+       key_tbl_size = align_size(num_buckets * key_size *
+                                 params->bucket_entries, CACHE_LINE_SIZE);
+
+       /* Total memory required for hash context */
+       mem_size = hash_tbl_size + sig_tbl_size + key_tbl_size;
+
+       /* Allocate as a memzone, or in normal memory space */
+#if defined(RTE_LIBRTE_HASH_USE_MEMZONE)
+       const struct rte_memzone *mz;
+       mz = rte_memzone_reserve(hash_name, mem_size, params->socket_id, 0);
+       if (mz == NULL) {
+               RTE_LOG(ERR, HASH, "memzone reservation failed\n");
+               return NULL;
+       }
+       memset(mz->addr, 0, mem_size);
+       h = (struct rte_hash *)mz->addr;
+#else
+       h = (struct rte_hash *)rte_zmalloc(hash_name, mem_size,
+                                          CACHE_LINE_SIZE);
+       if (h == NULL) {
+               RTE_LOG(ERR, HASH, "memory allocation failed\n");
+               return NULL;
+       }
+#endif
+
+       /* Setup hash context */
+       rte_snprintf(h->name, sizeof(h->name), "%s", params->name);
+       h->entries = params->entries;
+       h->bucket_entries = params->bucket_entries;
+       h->key_len = params->key_len;
+       h->hash_func_init_val = params->hash_func_init_val;
+       h->num_buckets = num_buckets;
+       h->bucket_bitmask = h->num_buckets - 1;
+       h->sig_msb = 1 << (sizeof(hash_sig_t) * 8 - 1);
+       h->sig_tbl = (uint8_t *)h + hash_tbl_size;
+       h->sig_tbl_bucket_size = sig_bucket_size;
+       h->key_tbl = h->sig_tbl + sig_tbl_size;
+       h->key_tbl_key_size = key_size;
+       h->hash_func = (params->hash_func == NULL) ?
+               DEFAULT_HASH_FUNC : params->hash_func;
+
+       if (h->hash_func == rte_hash_crc &&
+                       !rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_2)) {
+               RTE_LOG(WARNING, HASH, "CRC32 instruction requires SSE4.2, "
+                               "which is not supported on this system. "
+                               "Falling back to software hash\n.");
+               h->hash_func = rte_jhash;
+       }
+
+       TAILQ_INSERT_TAIL(hash_list, h, next);
+       return h;
+}
+
+void
+rte_hash_free(struct rte_hash *h)
+{
+       if (h == NULL)
+               return;
+#if !defined(RTE_LIBRTE_HASH_USE_MEMZONE)
+       TAILQ_REMOVE(hash_list, h, next);
+       rte_free(h);
+#endif
+       /* No way to deallocate memzones */
+       return;
+}
+
+int32_t
+rte_hash_add_key(const struct rte_hash *h, const void *key)
+{
+       hash_sig_t sig, *sig_bucket;
+       uint8_t *key_bucket;
+       uint32_t bucket_index, i;
+       int32_t pos;
+
+       RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+
+       /* Get the hash signature and bucket index */
+       sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb;
+       bucket_index = sig & h->bucket_bitmask;
+       sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+       key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+       /* Check if key is already present in the hash */
+       for (i = 0; i < h->bucket_entries; i++) {
+               if ((sig == sig_bucket[i]) &&
+                   likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+                                 h->key_len) == 0)) {
+                       return bucket_index * h->bucket_entries + i;
+               }
+       }
+
+       /* Check if any free slot within the bucket to add the new key */
+       pos = find_first(NULL_SIGNATURE, sig_bucket, h->bucket_entries);
+
+       if (unlikely(pos < 0))
+               return -ENOSPC;
+
+       /* Add the new key to the bucket */
+       sig_bucket[pos] = sig;
+       rte_memcpy(get_key_from_bucket(h, key_bucket, pos), key, h->key_len);
+       return bucket_index * h->bucket_entries + pos;
+}
+
+int32_t
+rte_hash_del_key(const struct rte_hash *h, const void *key)
+{
+       hash_sig_t sig, *sig_bucket;
+       uint8_t *key_bucket;
+       uint32_t bucket_index, i;
+
+       RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+
+       /* Get the hash signature and bucket index */
+       sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb;
+       bucket_index = sig & h->bucket_bitmask;
+       sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+       key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+       /* Check if key is already present in the hash */
+       for (i = 0; i < h->bucket_entries; i++) {
+               if ((sig == sig_bucket[i]) &&
+                   likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+                                 h->key_len) == 0)) {
+                       sig_bucket[i] = NULL_SIGNATURE;
+                       return bucket_index * h->bucket_entries + i;
+               }
+       }
+
+       return -ENOENT;
+}
+
+int32_t
+rte_hash_lookup(const struct rte_hash *h, const void *key)
+{
+       hash_sig_t sig, *sig_bucket;
+       uint8_t *key_bucket;
+       uint32_t bucket_index, i;
+
+       RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
+
+       /* Get the hash signature and bucket index */
+       sig = h->hash_func(key, h->key_len, h->hash_func_init_val) | h->sig_msb;
+       bucket_index = sig & h->bucket_bitmask;
+       sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+       key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+       /* Check if key is already present in the hash */
+       for (i = 0; i < h->bucket_entries; i++) {
+               if ((sig == sig_bucket[i]) &&
+                   likely(memcmp(key, get_key_from_bucket(h, key_bucket, i),
+                                 h->key_len) == 0)) {
+                       return bucket_index * h->bucket_entries + i;
+               }
+       }
+
+       return -ENOENT;
+}
+
+int
+rte_hash_lookup_multi(const struct rte_hash *h, const void **keys,
+                     uint32_t num_keys, int32_t *positions)
+{
+       uint32_t i, j, bucket_index;
+       hash_sig_t sigs[RTE_HASH_LOOKUP_MULTI_MAX];
+
+       RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
+                       (num_keys > RTE_HASH_LOOKUP_MULTI_MAX) ||
+                       (positions == NULL)), -EINVAL);
+
+       /* Get the hash signature and bucket index */
+       for (i = 0; i < num_keys; i++) {
+               sigs[i] = h->hash_func(keys[i], h->key_len,
+                               h->hash_func_init_val) | h->sig_msb;
+               bucket_index = sigs[i] & h->bucket_bitmask;
+
+               /* Pre-fetch relevant buckets */
+               rte_prefetch1((void *) get_sig_tbl_bucket(h, bucket_index));
+               rte_prefetch1((void *) get_key_tbl_bucket(h, bucket_index));
+       }
+
+       /* Check if key is already present in the hash */
+       for (i = 0; i < num_keys; i++) {
+               bucket_index = sigs[i] & h->bucket_bitmask;
+               hash_sig_t *sig_bucket = get_sig_tbl_bucket(h, bucket_index);
+               uint8_t *key_bucket = get_key_tbl_bucket(h, bucket_index);
+
+               positions[i] = -ENOENT;
+
+               for (j = 0; j < h->bucket_entries; j++) {
+                       if ((sigs[i] == sig_bucket[j]) &&
+                           likely(memcmp(keys[i],
+                                         get_key_from_bucket(h, key_bucket, j),
+                                         h->key_len) == 0)) {
+                               positions[i] = bucket_index *
+                                       h->bucket_entries + j;
+                               break;
+                       }
+               }
+       }
+
+       return 0;
+}
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
new file mode 100644 (file)
index 0000000..eb55deb
--- /dev/null
@@ -0,0 +1,236 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_HASH_H_
+#define _RTE_HASH_H_
+
+/**
+ * @file
+ *
+ * RTE Hash Table
+ */
+
+#include <stdint.h>
+#include <sys/queue.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Maximum size of hash table that can be created. */
+#define RTE_HASH_ENTRIES_MAX                   (1 << 26)
+
+/** Maximum bucket size that can be created. */
+#define RTE_HASH_BUCKET_ENTRIES_MAX            16
+
+/** Maximum length of key that can be used. */
+#define RTE_HASH_KEY_LENGTH_MAX                        64
+
+/** Max number of keys that can be searched for using rte_hash_lookup_multi. */
+#define RTE_HASH_LOOKUP_MULTI_MAX              16
+
+/** Max number of characters in hash name.*/
+#define RTE_HASH_NAMESIZE                      32
+
+/** Signature of key that is stored internally. */
+typedef uint32_t hash_sig_t;
+
+/** Type of function that can be used for calculating the hash value. */
+typedef uint32_t (*rte_hash_function)(const void *key, uint32_t key_len,
+                                     uint32_t init_val);
+
+/**
+ * Parameters used when creating the hash table. The total table entries and
+ * bucket entries must be a power of 2.
+ */
+struct rte_hash_parameters {
+       const char *name;               /**< Name of the hash. */
+       uint32_t entries;               /**< Total hash table entries. */
+       uint32_t bucket_entries;        /**< Bucket entries. */
+       uint32_t key_len;               /**< Length of hash key. */
+       rte_hash_function hash_func;    /**< Function used to calculate hash. */
+       uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
+       int socket_id;                  /**< NUMA Socket ID for memory. */
+};
+
+/** A hash table structure. */
+struct rte_hash {
+       TAILQ_ENTRY(rte_hash) next;/**< Next in list. */
+
+       char name[RTE_HASH_NAMESIZE];   /**< Name of the hash. */
+       uint32_t entries;               /**< Total table entries. */
+       uint32_t bucket_entries;        /**< Bucket entries. */
+       uint32_t key_len;               /**< Length of hash key. */
+       rte_hash_function hash_func;    /**< Function used to calculate hash. */
+       uint32_t hash_func_init_val;    /**< Init value used by hash_func. */
+       uint32_t num_buckets;           /**< Number of buckets in table. */
+       uint32_t bucket_bitmask;        /**< Bitmask for getting bucket index
+                                                       from hash signature. */
+       hash_sig_t sig_msb;     /**< MSB is always set in valid signatures. */
+       uint8_t *sig_tbl;       /**< Flat array of hash signature buckets. */
+       uint32_t sig_tbl_bucket_size;   /**< Signature buckets may be padded for
+                                          alignment reasons, and this is the
+                                          bucket size used by sig_tbl. */
+       uint8_t *key_tbl;       /**< Flat array of key value buckets. */
+       uint32_t key_tbl_key_size;      /**< Keys may be padded for alignment
+                                          reasons, and this is the key size
+                                          used by key_tbl. */
+};
+
+/**
+ * Create a new hash table. If RTE_LIBRTE_HASH_USE_MEMZONE is defined, then
+ * the hash table is allocated in a memzone on a specific NUMA socket ID,
+ * otherwise it is allocated in the heap.
+ *
+ * @param params
+ *   Parameters used to create and initialise the hash table.
+ * @return
+ *   Pointer to hash table structure that is used in future hash table
+ *   operations, or NULL on error, with error code set in rte_errno.
+ *   Possible rte_errno errors include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - E_RTE_NO_TAILQ - no tailq list could be got for the hash table list
+ *    - ENOENT - missing entry
+ *    - EINVAL - invalid parameter passed to function
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_hash *
+rte_hash_create(const struct rte_hash_parameters *params);
+
+
+/**
+ * Find an existing hash table object and return a pointer to it.
+ *
+ * @param name
+ *   Name of the hash table as passed to rte_hash_create()
+ * @return
+ *   Pointer to hash table or NULL if object not found
+ *   with rte_errno set appropriately. Possible rte_errno values include:
+ *    - ENOENT - value not available for return
+ */
+struct rte_hash *
+rte_hash_find_existing(const char *name);
+
+/**
+ * De-allocate all memory used by hash table. If RTE_LIBRTE_HASH_USE_MEMZONE
+ * is defined, then this has no effect.
+ * @param h
+ *   Hash table to free
+ */
+void
+rte_hash_free(struct rte_hash *h);
+
+/**
+ * Add a key to an existing hash table. This operation is not multi-thread safe
+ * and should only be called from one thread.
+ *
+ * @param h
+ *   Hash table to add the key to.
+ * @param key
+ *   Key to add to the hash table.
+ * @return
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOSPC if there is no space in the hash for this key.
+ *   - A positive value that can be used by the caller as an offset into an
+ *     array of user data. This value is unique for this key.
+ */
+int32_t
+rte_hash_add_key(const struct rte_hash *h, const void *key);
+
+/**
+ * Remove a key from an existing hash table. This operation is not multi-thread
+ * safe and should only be called from one thread.
+ *
+ * @param h
+ *   Hash table to remove the key from.
+ * @param key
+ *   Key to remove from the hash table.
+ * @return
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOENT if the key is not found.
+ *   - A positive value that can be used by the caller as an offset into an
+ *     array of user data. This value is unique for this key, and is the same
+ *     value that was returned when the key was added.
+ */
+int32_t
+rte_hash_del_key(const struct rte_hash *h, const void *key);
+
+/**
+ * Find a key in the hash table. This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to look in.
+ * @param key
+ *   Key to find.
+ * @return
+ *   - -EINVAL if the parameters are invalid.
+ *   - -ENOENT if the key is not found.
+ *   - A positive value that can be used by the caller as an offset into an
+ *     array of user data. This value is unique for this key, and is the same
+ *     value that was returned when the key was added.
+ */
+int32_t
+rte_hash_lookup(const struct rte_hash *h, const void *key);
+
+/**
+ * Find multiple keys in the hash table. This operation is multi-thread safe.
+ *
+ * @param h
+ *   Hash table to look in.
+ * @param keys
+ *   A pointer to a list of keys to look for.
+ * @param num_keys
+ *   How many keys are in the keys list (less than RTE_HASH_LOOKUP_MULTI_MAX).
+ * @param positions
+ *   Output containing a list of values, corresponding to the list of keys that
+ *   can be used by the caller as an offset into an array of user data. These
+ *   values are unique for each key, and are the same values that were returned
+ *   when each key was added. If a key in the list was not found, then -ENOENT
+ *   will be the value.
+ * @return
+ *   -EINVAL if there's an error, otherwise 0.
+ */
+int
+rte_hash_lookup_multi(const struct rte_hash *h, const void **keys,
+                     uint32_t num_keys, int32_t *positions);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HASH_H_ */
diff --git a/lib/librte_hash/rte_hash_crc.h b/lib/librte_hash/rte_hash_crc.h
new file mode 100644 (file)
index 0000000..c5cee9c
--- /dev/null
@@ -0,0 +1,114 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_HASH_CRC_H_
+#define _RTE_HASH_CRC_H_
+
+/**
+ * @file
+ *
+ * RTE CRC Hash
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Use single crc32 instruction to perform a hash on a 4 byte value.
+ *
+ * @param data
+ *   Data to perform hash on.
+ * @param init_val
+ *   Value to initialise hash generator.
+ * @return
+ *   32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc_4byte(uint32_t data, uint32_t init_val)
+{
+       asm volatile("crc32 %[data], %[init_val]"
+                    : [init_val]"=r" (init_val)
+                    : [data]"r" (data), "[init_val]" (init_val));
+       return init_val;
+}
+
+/**
+ * Use crc32 instruction to perform a hash.
+ *
+ * @param data
+ *   Data to perform hash on.
+ * @param data_len
+ *   How many bytes to use to calculate hash value.
+ * @param init_val
+ *   Value to initialise hash generator.
+ * @return
+ *   32bit calculated hash value.
+ */
+static inline uint32_t
+rte_hash_crc(const void *data, uint32_t data_len, uint32_t init_val)
+{
+       unsigned i;
+       uint32_t temp = 0;
+       const uint32_t *p32 = (const uint32_t *)data;
+
+       for (i = 0; i < data_len / 4; i++) {
+               init_val = rte_hash_crc_4byte(*p32++, init_val);
+       }
+
+       switch (3 - (data_len & 0x03)) {
+       case 0:
+               temp |= *((const uint8_t *)p32 + 2) << 16;
+               /* Fallthrough */
+       case 1:
+               temp |= *((const uint8_t *)p32 + 1) << 8;
+               /* Fallthrough */
+       case 2:
+               temp |= *((const uint8_t *)p32);
+               init_val = rte_hash_crc_4byte(temp, init_val);
+       default:
+               break;
+       }
+
+       return init_val;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_HASH_CRC_H_ */
diff --git a/lib/librte_hash/rte_jhash.h b/lib/librte_hash/rte_jhash.h
new file mode 100644 (file)
index 0000000..12f794c
--- /dev/null
@@ -0,0 +1,263 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_JHASH_H
+#define _RTE_JHASH_H
+
+/**
+ * @file
+ *
+ * jhash functions.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* jhash.h: Jenkins hash support.
+ *
+ * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ *
+ * http://burtleburtle.net/bob/hash/
+ *
+ * These are the credits from Bob's sources:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose.  It has no warranty.
+ *
+ * $FreeBSD$
+ */
+
+/** @internal Internal function. NOTE: Arguments are modified. */
+#define __rte_jhash_mix(a, b, c) do { \
+       a -= b; a -= c; a ^= (c>>13); \
+       b -= c; b -= a; b ^= (a<<8); \
+       c -= a; c -= b; c ^= (b>>13); \
+       a -= b; a -= c; a ^= (c>>12); \
+       b -= c; b -= a; b ^= (a<<16); \
+       c -= a; c -= b; c ^= (b>>5); \
+       a -= b; a -= c; a ^= (c>>3); \
+       b -= c; b -= a; b ^= (a<<10); \
+       c -= a; c -= b; c ^= (b>>15); \
+} while (0)
+
+/** The golden ratio: an arbitrary value. */
+#define RTE_JHASH_GOLDEN_RATIO      0x9e3779b9
+
+/**
+ * The most generic version, hashes an arbitrary sequence
+ * of bytes.  No alignment or length assumptions are made about
+ * the input key.
+ *
+ * @param key
+ *   Key to calculate hash of.
+ * @param length
+ *   Length of key in bytes.
+ * @param initval
+ *   Initialising value of hash.
+ * @return
+ *   Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash(const void *key, uint32_t length, uint32_t initval)
+{
+       uint32_t a, b, c, len;
+       const uint8_t *k = (const uint8_t *) key;
+
+       len = length;
+       a = b = RTE_JHASH_GOLDEN_RATIO;
+       c = initval;
+
+       while (len >= 12) {
+               a += (k[0] + ((uint32_t)k[1] << 8) + ((uint32_t)k[2] << 16) +
+                     ((uint32_t)k[3] << 24));
+               b += (k[4] + ((uint32_t)k[5] << 8) + ((uint32_t)k[6] << 16) +
+                     ((uint32_t)k[7] << 24));
+               c += (k[8] + ((uint32_t)k[9] << 8) + ((uint32_t)k[10] << 16) +
+                     ((uint32_t)k[11] << 24));
+
+               __rte_jhash_mix(a,b,c);
+
+               k += 12;
+               len -= 12;
+       }
+
+       c += length;
+       switch (len) {
+               case 11: c += ((uint32_t)k[10] << 24);
+               case 10: c += ((uint32_t)k[9] << 16);
+               case 9 : c += ((uint32_t)k[8] << 8);
+               case 8 : b += ((uint32_t)k[7] << 24);
+               case 7 : b += ((uint32_t)k[6] << 16);
+               case 6 : b += ((uint32_t)k[5] << 8);
+               case 5 : b += k[4];
+               case 4 : a += ((uint32_t)k[3] << 24);
+               case 3 : a += ((uint32_t)k[2] << 16);
+               case 2 : a += ((uint32_t)k[1] << 8);
+               case 1 : a += k[0];
+               default: break;
+       };
+
+       __rte_jhash_mix(a,b,c);
+
+       return c;
+}
+
+/**
+ * A special optimized version that handles 1 or more of uint32_ts.
+ * The length parameter here is the number of uint32_ts in the key.
+ *
+ * @param k
+ *   Key to calculate hash of.
+ * @param length
+ *   Length of key in units of 4 bytes.
+ * @param initval
+ *   Initialising value of hash.
+ * @return
+ *   Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash2(uint32_t *k, uint32_t length, uint32_t initval)
+{
+       uint32_t a, b, c, len;
+
+       a = b = RTE_JHASH_GOLDEN_RATIO;
+       c = initval;
+       len = length;
+
+       while (len >= 3) {
+               a += k[0];
+               b += k[1];
+               c += k[2];
+               __rte_jhash_mix(a, b, c);
+               k += 3; len -= 3;
+       }
+
+       c += length * 4;
+
+       switch (len) {
+               case 2 : b += k[1];
+               case 1 : a += k[0];
+               default: break;
+       };
+
+       __rte_jhash_mix(a,b,c);
+
+       return c;
+}
+
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 3 words.
+ *
+ * @param a
+ *   First word to calcuate hash of.
+ * @param b
+ *   Second word to calcuate hash of.
+ * @param c
+ *   Third word to calcuate hash of.
+ * @param initval
+ *   Initialising value of hash.
+ * @return
+ *   Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_3words(uint32_t a, uint32_t b, uint32_t c, uint32_t initval)
+{
+       a += RTE_JHASH_GOLDEN_RATIO;
+       b += RTE_JHASH_GOLDEN_RATIO;
+       c += initval;
+
+       __rte_jhash_mix(a, b, c);
+
+       /*
+        * NOTE: In particular the "c += length; __rte_jhash_mix(a,b,c);"
+        *       normally done at the end is not done here.
+        */
+       return c;
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 2 words.
+ *
+ * NOTE: In partilar the "c += length; __rte_jhash_mix(a,b,c);" normally
+ *       done at the end is not done here.
+ *
+ * @param a
+ *   First word to calcuate hash of.
+ * @param b
+ *   Second word to calcuate hash of.
+ * @param initval
+ *   Initialising value of hash.
+ * @return
+ *   Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_2words(uint32_t a, uint32_t b, uint32_t initval)
+{
+       return rte_jhash_3words(a, b, 0, initval);
+}
+
+/**
+ * A special ultra-optimized versions that knows it is hashing exactly
+ * 1 word.
+ *
+ * NOTE: In partilar the "c += length; __rte_jhash_mix(a,b,c);" normally
+ *       done at the end is not done here.
+ *
+ * @param a
+ *   Word to calcuate hash of.
+ * @param initval
+ *   Initialising value of hash.
+ * @return
+ *   Calculated hash value.
+ */
+static inline uint32_t
+rte_jhash_1word(uint32_t a, uint32_t initval)
+{
+       return rte_jhash_3words(a, 0, 0, initval);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_JHASH_H */
diff --git a/lib/librte_lpm/Makefile b/lib/librte_lpm/Makefile
new file mode 100644 (file)
index 0000000..1cb8d27
--- /dev/null
@@ -0,0 +1,51 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_lpm.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_LPM) := rte_lpm.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_LPM)-include := rte_lpm.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_LPM) += lib/librte_eal lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c
new file mode 100644 (file)
index 0000000..4269b3c
--- /dev/null
@@ -0,0 +1,971 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_memory.h>        /* for definition of CACHE_LINE_SIZE */
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "rte_lpm.h"
+
+TAILQ_HEAD(rte_lpm_list, rte_lpm);
+
+/* global list of ring (used for debug/dump) */
+static struct rte_lpm_list *lpm_list;
+
+#define CHECK_LPM_LIST_CREATED() do { \
+       if (lpm_list == NULL) \
+               if ((lpm_list = RTE_TAILQ_RESERVE("RTE_LPM", rte_lpm_list)) == NULL){ \
+                       rte_errno = E_RTE_NO_TAILQ; \
+               return NULL; \
+       } \
+} while (0)
+
+#define MAX_DEPTH_TBL24 24
+
+enum valid_flag {
+       INVALID = 0,
+       VALID
+};
+
+/* Macro to enable/disable run-time checks. */
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+#include <rte_debug.h>
+#define VERIFY_DEPTH(depth) do {                                                  \
+       if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))                          \
+               rte_panic("LPM: Invalid depth (%u) at line %d", depth, __LINE__); \
+} while (0)
+#else
+#define VERIFY_DEPTH(depth)
+#endif
+
+/*
+ * Function Name: depth_to_mask
+ * Usage       : Converts a given depth value to its corresponding mask value.
+ *
+ * depth  (IN)         : range = 1 - 32
+ * mask   (OUT)                : 32bit mask
+ */
+static uint32_t __attribute__((pure))
+depth_to_mask(uint8_t depth)
+{
+       VERIFY_DEPTH(depth);
+
+       /* To calculate a mask start with a 1 on the left hand side and right
+        * shift while populating the left hand side with 1's
+        */
+       return (int)0x80000000 >> (depth - 1);
+}
+
+/*
+ * Function Name: depth_to_range
+ * Usage       : Converts given depth value to its corresponding range value.
+ *
+ * (IN)  depth
+ * (OUT) mask
+ */
+static inline uint32_t __attribute__((pure))
+depth_to_range(uint8_t depth)
+{
+       VERIFY_DEPTH(depth);
+
+       /*
+        * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
+        */
+       if (depth <= MAX_DEPTH_TBL24)
+               return 1 << (MAX_DEPTH_TBL24 - depth);
+
+       /* Else if depth is greater than 24 */
+       return (1 << (RTE_LPM_MAX_DEPTH - depth));
+}
+
+/*
+ * Find an existing lpm table and return a pointer to it.
+ */
+struct rte_lpm *
+rte_lpm_find_existing(const char *name)
+{
+       struct rte_lpm *l;
+
+       /* check that we have an initialised tail queue */
+       CHECK_LPM_LIST_CREATED();
+
+       TAILQ_FOREACH(l, lpm_list, next) {
+               if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
+                       break;
+       }
+
+       if (l == NULL)
+               rte_errno = ENOENT;
+
+       return l;
+}
+
+/*
+ * Function Name       : rte_lpm_create
+ * Usage               : Allocates memory for LPM object
+ *
+ * rte_lpm (RETURN)
+ */
+struct rte_lpm *
+rte_lpm_create(const char *name, int socket_id, int max_rules,
+               int mem_location)
+{
+       char mem_name[RTE_LPM_NAMESIZE];
+       struct rte_lpm *lpm = NULL;
+       uint32_t mem_size;
+
+       /* check that we have access to create things in shared memory. */
+       if (rte_eal_process_type() == RTE_PROC_SECONDARY){
+               rte_errno = E_RTE_SECONDARY;
+               return NULL;
+       }
+
+       /* check that we have an initialised tail queue */
+       CHECK_LPM_LIST_CREATED();
+
+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2);
+       RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2);
+
+       /* Check user arguments. */
+       if ((name == NULL) || (socket_id < -1) || (max_rules == 0) ||
+                       (mem_location != RTE_LPM_HEAP &&
+                                       mem_location != RTE_LPM_MEMZONE)){
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       rte_snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
+
+       /*
+        * Pad out max_rules so that each depth is given the same number of
+        * rules.
+        */
+       if (max_rules % RTE_LPM_MAX_DEPTH) {
+               max_rules += RTE_LPM_MAX_DEPTH -
+                               (max_rules % RTE_LPM_MAX_DEPTH);
+       }
+
+       /* Determine the amount of memory to allocate. */
+       mem_size = sizeof(*lpm) + (sizeof(lpm->rules_tbl[0]) * max_rules);
+
+       /* Allocate memory to store the LPM data structures. */
+       if (mem_location == RTE_LPM_MEMZONE) {
+               const struct rte_memzone *mz;
+               uint32_t mz_flags = 0;
+
+               mz = rte_memzone_reserve(mem_name, mem_size, socket_id,
+                               mz_flags);
+               if (mz == NULL) {
+                       RTE_LOG(ERR, LPM, "LPM memzone creation failed\n");
+                       return NULL;
+               }
+
+               memset(mz->addr, 0, mem_size);
+               lpm = (struct rte_lpm *) mz->addr;
+
+       }
+       else {
+               lpm = (struct rte_lpm *)rte_zmalloc(mem_name, mem_size,
+                       CACHE_LINE_SIZE);
+               if (lpm == NULL) {
+                       RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
+                       return NULL;
+               }
+       }
+
+       /* Save user arguments. */
+       lpm->max_rules_per_depth = max_rules / RTE_LPM_MAX_DEPTH;
+       rte_snprintf(lpm->name, sizeof(lpm->name), "%s", name);
+       lpm->mem_location = mem_location;
+
+       TAILQ_INSERT_TAIL(lpm_list, lpm, next);
+
+       return lpm;
+}
+
+/*
+ * Function Name       : free
+ * Usage: Deallocates memory for given LPM table.
+ */
+void
+rte_lpm_free(struct rte_lpm *lpm)
+{
+       /* Check user arguments. */
+       if (lpm == NULL)
+               return;
+
+       /* Note: Its is currently not possible to free a memzone. */
+       if (lpm->mem_location == RTE_LPM_HEAP){
+               TAILQ_REMOVE(lpm_list, lpm, next);
+               rte_free(lpm);
+       }
+}
+
+/*
+ * Function Name: rule_add
+ * Usage       : Adds a rule to the rule table.
+ *
+ * NOTE: The rule table is split into 32 groups. Each group contains rules that
+ * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
+ * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
+ * to refer to depth 1 because even though the depth range is 1 - 32, depths
+ * are stored in the rule table from 0 - 31.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline int32_t
+rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+       uint8_t next_hop)
+{
+       uint32_t rule_gindex, rule_index, last_rule;
+
+       VERIFY_DEPTH(depth);
+
+       /* rule_gindex stands for rule group index. */
+       rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
+       /* Initialise rule_index to point to start of rule group. */
+       rule_index = rule_gindex;
+       /* Last rule = Last used rule in this rule group. */
+       last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
+
+       /* Scan through rule group to see if rule already exists. */
+       for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+
+               /* If rule already exists update its next_hop and return. */
+               if (lpm->rules_tbl[rule_index].ip == ip_masked) {
+                       lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+                       return rule_index;
+               }
+       }
+
+       /*
+        * If rule does not exist check if there is space to add a new rule to
+        * this rule group. If there is no space return error. */
+       if (lpm->used_rules_at_depth[depth - 1] == lpm->max_rules_per_depth) {
+               return -ENOSPC;
+       }
+
+       /* If there is space for the new rule add it. */
+       lpm->rules_tbl[rule_index].ip = ip_masked;
+       lpm->rules_tbl[rule_index].next_hop = next_hop;
+
+       /* Increment the used rules counter for this rule group. */
+       lpm->used_rules_at_depth[depth - 1]++;
+
+       return rule_index;
+}
+
+/*
+ * Function Name: rule_delete
+ * Usage       : Delete a rule from the rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline void
+rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
+{
+       uint32_t rule_gindex, last_rule_index;
+
+       VERIFY_DEPTH(depth);
+
+       rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
+       last_rule_index = rule_gindex +
+                       (lpm->used_rules_at_depth[depth - 1]) - 1;
+       /*
+        * Overwrite redundant rule with last rule in group and decrement rule
+        * counter.
+        */
+       lpm->rules_tbl[rule_index] = lpm->rules_tbl[last_rule_index];
+       lpm->used_rules_at_depth[depth - 1]--;
+}
+
+
+/*
+ * Function Name: rule_find
+ * Usage       : Finds a rule in rule table.
+ * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
+ */
+static inline int32_t
+rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
+{
+       uint32_t rule_gindex, last_rule, rule_index;
+
+       VERIFY_DEPTH(depth);
+
+       rule_gindex = ((depth - 1) * lpm->max_rules_per_depth);
+       last_rule = rule_gindex + lpm->used_rules_at_depth[depth - 1];
+
+       /* Scan used rules at given depth to find rule. */
+       for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
+               /* If rule is found return the rule index. */
+               if (lpm->rules_tbl[rule_index].ip == ip_masked)
+                       return (rule_index);
+       }
+
+       /* If rule is not found return -E_RTE_NO_TAILQ. */
+       return -E_RTE_NO_TAILQ;
+}
+
+/*
+ * Function Name: tbl8_alloc
+ * Usage       : Find, clean and allocate a tbl8.
+ */
+static inline int32_t
+tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8)
+{
+       uint32_t tbl8_gindex; /* tbl8 group index. */
+       struct rte_lpm_tbl8_entry *tbl8_entry;
+
+       /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
+       for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS;
+                       tbl8_gindex++) {
+               tbl8_entry = &tbl8[tbl8_gindex *
+                                  RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+               /* If a free tbl8 group is found clean it and set as VALID. */
+               if (!tbl8_entry->valid_group) {
+                       memset(&tbl8_entry[0], 0,
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
+                                       sizeof(tbl8_entry[0]));
+
+                       tbl8_entry->valid_group = VALID;
+
+                       /* Return group index for allocated tbl8 group. */
+                       return tbl8_gindex;
+               }
+       }
+
+       /* If there are no tbl8 groups free then return error. */
+       return -ENOSPC;
+}
+
+static inline void
+tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+{
+       /* Set tbl8 group invalid*/
+       tbl8[tbl8_group_start].valid_group = INVALID;
+}
+
+static inline int32_t
+add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint8_t next_hop)
+{
+       uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
+
+       /* Calculate the index into Table24. */
+       tbl24_index = ip >> 8;
+       tbl24_range = depth_to_range(depth);
+
+       for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+               /*
+                * For invalid OR valid and non-extended tbl 24 entries set
+                * entry.
+                */
+               if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 &&
+                               lpm->tbl24[i].depth <= depth)) {
+
+                       struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                               .valid = VALID,
+                               .ext_entry = 0,
+                               .depth = depth,
+                               { .next_hop = next_hop, }
+                       };
+
+                       /* Setting tbl24 entry in one go to avoid race
+                        * conditions */
+                       lpm->tbl24[i] = new_tbl24_entry;
+
+                       continue;
+               }
+
+               /* If tbl24 entry is valid and extended calculate the index
+                * into tbl8. */
+               tbl8_index = lpm->tbl24[tbl24_index].tbl8_gindex *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+               for (j = tbl8_index; j < tbl8_group_end; j++) {
+                       if (!lpm->tbl8[j].valid ||
+                                       lpm->tbl8[j].depth <= depth) {
+                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                                       .valid = VALID,
+                                       .valid_group = VALID,
+                                       .depth = depth,
+                                       .next_hop = next_hop,
+                               };
+
+                               /*
+                                * Setting tbl8 entry in one go to avoid race
+                                * conditions
+                                */
+                               lpm->tbl8[j] = new_tbl8_entry;
+
+                               continue;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static inline int32_t
+add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
+               uint8_t next_hop)
+{
+       uint32_t tbl24_index;
+       int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
+               tbl8_range, i;
+
+       tbl24_index = (ip_masked >> 8);
+       tbl8_range = depth_to_range(depth);
+
+       if (!lpm->tbl24[tbl24_index].valid) {
+               /* Search for a free tbl8 group. */
+               tbl8_group_index = tbl8_alloc(lpm->tbl8);
+
+               /* Check tbl8 allocation was successful. */
+               if (tbl8_group_index < 0) {
+                       return tbl8_group_index;
+               }
+
+               /* Find index into tbl8 and range. */
+               tbl8_index = (tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+                               (ip_masked & 0xFF);
+
+               /* Set tbl8 entry. */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       lpm->tbl8[i].depth = depth;
+                       lpm->tbl8[i].next_hop = next_hop;
+                       lpm->tbl8[i].valid = VALID;
+               }
+
+               /*
+                * Update tbl24 entry to point to new tbl8 entry. Note: The
+                * ext_flag and tbl8_index need to be updated simultaneously,
+                * so assign whole structure in one go
+                */
+
+               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       .valid = VALID,
+                       .ext_entry = 1,
+                       .depth = 0,
+                       { .tbl8_gindex = (uint8_t)tbl8_group_index, }
+               };
+
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+       }/* If valid entry but not extended calculate the index into Table8. */
+       else if (lpm->tbl24[tbl24_index].ext_entry == 0) {
+               /* Search for free tbl8 group. */
+               tbl8_group_index = tbl8_alloc(lpm->tbl8);
+
+               if (tbl8_group_index < 0) {
+                       return tbl8_group_index;
+               }
+
+               tbl8_group_start = tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               tbl8_group_end = tbl8_group_start +
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+               /* Populate new tbl8 with tbl24 value. */
+               for (i = tbl8_group_start; i < tbl8_group_end; i++) {
+                       lpm->tbl8[i].valid = VALID;
+                       lpm->tbl8[i].depth = lpm->tbl24[tbl24_index].depth;
+                       lpm->tbl8[i].next_hop =
+                                       lpm->tbl24[tbl24_index].next_hop;
+               }
+
+               tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+               /* Insert new rule into the tbl8 entry. */
+               for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
+                       if (!lpm->tbl8[i].valid ||
+                                       lpm->tbl8[i].depth <= depth) {
+                               lpm->tbl8[i].valid = VALID;
+                               lpm->tbl8[i].depth = depth;
+                               lpm->tbl8[i].next_hop = next_hop;
+
+                               continue;
+                       }
+               }
+
+               /*
+                * Update tbl24 entry to point to new tbl8 entry. Note: The
+                * ext_flag and tbl8_index need to be updated simultaneously,
+                * so assign whole structure in one go.
+                */
+
+               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                               .valid = VALID,
+                               .ext_entry = 1,
+                               .depth = 0,
+                               { .tbl8_gindex = (uint8_t)tbl8_group_index, }
+               };
+
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+
+       }
+       else { /*
+               * If it is valid, extended entry calculate the index into tbl8.
+               */
+               tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+               tbl8_group_start = tbl8_group_index *
+                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+               tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+
+                       if (!lpm->tbl8[i].valid ||
+                                       lpm->tbl8[i].depth <= depth) {
+                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                                       .valid = VALID,
+                                       .depth = depth,
+                                       .next_hop = next_hop,
+                               };
+
+                               /*
+                                * Setting tbl8 entry in one go to avoid race
+                                * condition
+                                */
+                               lpm->tbl8[i] = new_tbl8_entry;
+
+                               continue;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Function Name       : rte_lpm_add
+ * Usage               : Add a route
+ *
+ *(IN) lpm_handle,
+ *(IN) ip
+ *(IN) depth
+ *(IN) next_hop
+ */
+int
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
+               uint8_t next_hop)
+{
+       int32_t rule_index, status = 0;
+       uint32_t ip_masked = (ip & depth_to_mask(depth));
+
+       /* Check user arguments. */
+       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
+               return -EINVAL;
+
+       /* Add the rule to the rule table. */
+       rule_index = rule_add(lpm, ip_masked, depth, next_hop);
+
+       /* If the is no space available for new rule return error. */
+       if (rule_index < 0) {
+               return rule_index;
+       }
+
+       if (depth <= MAX_DEPTH_TBL24) {
+               status = add_depth_small(lpm, ip_masked, depth, next_hop);
+       }
+       else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
+               status = add_depth_big(lpm, ip_masked, depth, next_hop);
+
+               /*
+                * If add fails due to exhaustion of tbl8 extensions delete
+                * rule that was added to rule table.
+                */
+               if (status < 0) {
+                       rule_delete(lpm, rule_index, depth);
+
+                       return status;
+               }
+       }
+
+       return 0;
+}
+
+static inline int32_t
+find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+       int32_t rule_index;
+       uint32_t ip_masked;
+       uint8_t prev_depth;
+
+       for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
+               ip_masked = ip & depth_to_mask(prev_depth);
+
+               rule_index = rule_find(lpm, ip_masked, prev_depth);
+
+               if (rule_index >= 0)
+                       return rule_index;
+       }
+
+       return -1;
+}
+
+static inline int32_t
+delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
+       uint8_t depth, int32_t sub_rule_index)
+{
+       uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
+       uint8_t new_depth;
+
+       /* Calculate the range and index into Table24. */
+       tbl24_range = depth_to_range(depth);
+       tbl24_index = (ip_masked >> 8);
+
+       /*
+        * Firstly check the sub_rule_index. A -1 indicates no replacement rule
+        * and a positive number indicates a sub_rule_index.
+        */
+       if (sub_rule_index < 0) {
+               /*
+                * If no replacement rule exists then invalidate entries
+                * associated with this rule.
+                */
+               for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+                       if (lpm->tbl24[i].ext_entry == 0 &&
+                                       lpm->tbl24[i].depth <= depth ) {
+                               lpm->tbl24[i].valid = INVALID;
+                       }
+                       else {
+                               /*
+                                * If TBL24 entry is extended, then there has
+                                * to be a rule with depth >= 25 in the
+                                * associated TBL8 group.
+                                */
+                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_index = tbl8_group_index *
+                                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                               for (j = tbl8_index; j < (tbl8_index +
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+                                       if (lpm->tbl8[j].depth <= depth)
+                                               lpm->tbl8[j].valid = INVALID;
+                               }
+                       }
+               }
+       }
+       else {
+               /*
+                * If a replacement rule exists then modify entries
+                * associated with this rule.
+                */
+
+               /* Calculate depth of sub_rule. */
+               new_depth = (uint8_t) (sub_rule_index /
+                               lpm->max_rules_per_depth);
+
+               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       .valid = VALID,
+                       .ext_entry = 0,
+                       .depth = new_depth,
+                       {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}
+               };
+
+               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                       .valid = VALID,
+                       .depth = new_depth,
+                       .next_hop = lpm->rules_tbl
+                       [sub_rule_index].next_hop,
+               };
+
+               for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
+
+                       if (lpm->tbl24[i].ext_entry == 0 &&
+                                       lpm->tbl24[i].depth <= depth ) {
+                               lpm->tbl24[i] = new_tbl24_entry;
+                       }
+                       else {
+                               /*
+                                * If TBL24 entry is extended, then there has
+                                * to be a rule with depth >= 25 in the
+                                * associated TBL8 group.
+                                */
+
+                               tbl8_group_index = lpm->tbl24[i].tbl8_gindex;
+                               tbl8_index = tbl8_group_index *
+                                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                               for (j = tbl8_index; j < (tbl8_index +
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
+
+                                       if (lpm->tbl8[j].depth <= depth)
+                                               lpm->tbl8[j] = new_tbl8_entry;
+                               }
+                       }
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Function Name: tbl8_recycle_check
+ * Usage       : Checks if table 8 group can be recycled.
+ *
+ * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
+ * Return of -EINVAL means tbl8 is empty and thus can be recycled
+ * Return of value > -1 means tbl8 is in use but has all the same values and
+ * thus can be recycled
+ */
+static inline int32_t
+tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start)
+{
+       uint32_t tbl8_group_end, i;
+       tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+       /*
+        * Check the first entry of the given tbl8. If it is invalid we know
+        * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
+        *  (As they would affect all entries in a tbl8) and thus this table
+        *  can not be recycled.
+        */
+       if (tbl8[tbl8_group_start].valid) {
+               /*
+                * If first entry is valid check if the depth is less than 24
+                * and if so check the rest of the entries to verify that they
+                * are all of this depth.
+                */
+               if (tbl8[tbl8_group_start].depth < MAX_DEPTH_TBL24) {
+                       for (i = (tbl8_group_start + 1); i < tbl8_group_end;
+                                       i++) {
+
+                               if (tbl8[i].depth !=
+                                               tbl8[tbl8_group_start].depth) {
+
+                                       return -EEXIST;
+                               }
+                       }
+                       /* If all entries are the same return the tb8 index */
+                       return tbl8_group_start;
+               }
+
+               return -EEXIST;
+       }
+       /*
+        * If the first entry is invalid check if the rest of the entries in
+        * the tbl8 are invalid.
+        */
+       for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
+               if (tbl8[i].valid)
+                       return -EEXIST;
+       }
+       /* If no valid entries are found then return -EINVAL. */
+       return -EINVAL;
+}
+
+static inline int32_t
+delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
+       uint8_t depth, int32_t sub_rule_index)
+{
+       uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
+                       tbl8_range, i;
+       uint8_t new_depth;
+       int32_t tbl8_recycle_index;
+
+       /*
+        * Calculate the index into tbl24 and range. Note: All depths larger
+        * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
+        */
+       tbl24_index = ip_masked >> 8;
+
+       /* Calculate the index into tbl8 and range. */
+       tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+       tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+       tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
+       tbl8_range = depth_to_range(depth);
+
+       if (sub_rule_index < 0) {
+               /*
+                * Loop through the range of entries on tbl8 for which the
+                * rule_to_delete must be removed or modified.
+                */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       if (lpm->tbl8[i].depth <= depth)
+                               lpm->tbl8[i].valid = INVALID;
+               }
+       }
+       else {
+               new_depth = (uint8_t)(sub_rule_index /
+                               lpm->max_rules_per_depth);
+
+               /* Set new tbl8 entry. */
+               struct rte_lpm_tbl8_entry new_tbl8_entry = {
+                       .valid = VALID,
+                       .depth = new_depth,
+                       .next_hop = lpm->rules_tbl[sub_rule_index].next_hop,
+               };
+
+               /*
+                * Loop through the range of entries on tbl8 for which the
+                * rule_to_delete must be modified.
+                */
+               for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
+                       if (lpm->tbl8[i].depth <= depth)
+                               lpm->tbl8[i] = new_tbl8_entry;
+               }
+       }
+
+       /*
+        * Check if there are any valid entries in this tbl8 group. If all
+        * tbl8 entries are invalid we can free the tbl8 and invalidate the
+        * associated tbl24 entry.
+        */
+
+       tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
+
+       if (tbl8_recycle_index == -EINVAL){
+               /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               lpm->tbl24[tbl24_index].valid = 0;
+               tbl8_free(lpm->tbl8, tbl8_group_start);
+       }
+       else if (tbl8_recycle_index > -1) {
+               /* Update tbl24 entry. */
+               struct rte_lpm_tbl24_entry new_tbl24_entry = {
+                       .valid = VALID,
+                       .ext_entry = 0,
+                       .depth = lpm->tbl8[tbl8_recycle_index].depth,
+                       { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }
+               };
+
+               /* Set tbl24 before freeing tbl8 to avoid race condition. */
+               lpm->tbl24[tbl24_index] = new_tbl24_entry;
+               tbl8_free(lpm->tbl8, tbl8_group_start);
+       }
+
+       return 0;
+}
+
+/*
+ * Function Name: rte_lpm_delete
+ * Usage       : Deletes a rule
+ *
+ *(IN) lpm_handle,
+ *(IN) ip
+ *(IN) depth
+ */
+int
+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
+{
+       int32_t rule_to_delete_index, sub_rule_index;
+       uint32_t ip_masked;
+       /*
+        * Check input arguments. Note: IP must be a positive integer of 32
+        * bits in length therefore it need not be checked.
+        */
+       if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
+               return -EINVAL;
+       }
+
+       ip_masked = ip & depth_to_mask(depth);
+
+       /*
+        * Find the index of the input rule, that needs to be deleted, in the
+        * rule table.
+        */
+       rule_to_delete_index = rule_find(lpm, ip_masked, depth);
+
+       /*
+        * Check if rule_to_delete_index was found. If no rule was found the
+        * function rule_find returns -E_RTE_NO_TAILQ.
+        */
+       if (rule_to_delete_index < 0)
+               return -E_RTE_NO_TAILQ;
+
+       /* Delete the rule from the rule table. */
+       rule_delete(lpm, rule_to_delete_index, depth);
+
+       /*
+        * Find rule to replace the rule_to_delete. If there is no rule to
+        * replace the rule_to_delete we return -1 and invalidate the table
+        * entries associated with this rule.
+        */
+       sub_rule_index = find_previous_rule(lpm, ip, depth);
+
+       /*
+        * If the input depth value is less than 25 use function
+        * delete_depth_small otherwise use delete_depth_big.
+        */
+       if (depth <= MAX_DEPTH_TBL24) {
+               return delete_depth_small(lpm, ip_masked, depth,
+                               sub_rule_index);
+       }
+       else { /* If depth > MAX_DEPTH_TBL24 */
+               return delete_depth_big(lpm, ip_masked, depth, sub_rule_index);
+       }
+}
+
+/*
+ * Function Name: rte_lpm_delete_all
+ * Usage       : Delete all rules from the LPM table.
+ *
+ *(IN) lpm_handle
+ */
+void
+rte_lpm_delete_all(struct rte_lpm *lpm)
+{
+       /* Zero used rules counter. */
+       memset(lpm->used_rules_at_depth, 0, sizeof(lpm->used_rules_at_depth));
+
+       /* Zero tbl24. */
+       memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
+
+       /* Zero tbl8. */
+       memset(lpm->tbl8, 0, sizeof(lpm->tbl8));
+
+       /* Delete all rules form the rules table. */
+       memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) *
+                       (lpm->max_rules_per_depth * RTE_LPM_MAX_DEPTH));
+}
+
diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h
new file mode 100644 (file)
index 0000000..e74d70e
--- /dev/null
@@ -0,0 +1,288 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_LPM_H_
+#define _RTE_LPM_H_
+
+/**
+ * @file
+ * RTE Longest Prefix Match (LPM)
+ */
+
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Max number of characters in LPM name. */
+#define RTE_LPM_NAMESIZE       32
+
+/** Possible location to allocate memory. */
+#define RTE_LPM_HEAP   0
+
+/** Possible location to allocate memory. */
+#define RTE_LPM_MEMZONE        1
+
+/** Maximum depth value possible for IPv4 LPM. */
+#define RTE_LPM_MAX_DEPTH 32
+
+/** Total number of tbl24 entries. */
+#define RTE_LPM_TBL24_NUM_ENTRIES (1 << 24)
+
+/** Number of entries in a tbl8 group. */
+#define RTE_LPM_TBL8_GROUP_NUM_ENTRIES 256
+
+/** Total number of tbl8 groups in the tbl8. */
+#define RTE_LPM_TBL8_NUM_GROUPS 256
+
+/** Total number of tbl8 entries. */
+#define RTE_LPM_TBL8_NUM_ENTRIES (RTE_LPM_TBL8_NUM_GROUPS *                   \
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES)
+
+/** Macro to enable/disable run-time checks. */
+#if defined(RTE_LIBRTE_LPM_DEBUG)
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval) do {                             \
+       if (cond) return (retval);                                            \
+} while (0)
+#else
+#define RTE_LPM_RETURN_IF_TRUE(cond, retval)
+#endif
+
+/** Tbl24 entry structure. */
+struct rte_lpm_tbl24_entry {
+       /* Using single uint8_t to store 3 values. */
+       uint8_t valid     :1; /**< Validation flag. */
+       uint8_t ext_entry :1; /**< External entry. */
+       uint8_t depth     :6; /**< Rule depth. */
+       /* Stores Next hop or group index (i.e. gindex)into tbl8. */
+       union {
+               uint8_t next_hop;
+               uint8_t tbl8_gindex;
+       };
+};
+
+/** Tbl8 entry structure. */
+struct rte_lpm_tbl8_entry {
+       /* Using single uint8_t to store 3 values. */
+       uint8_t valid       :1; /**< Validation flag. */
+       uint8_t valid_group :1; /**< Group validation flag. */
+       uint8_t depth       :6; /**< Rule depth. */
+       uint8_t next_hop; /**< next hop. */
+};
+
+/** Rule structure. */
+struct rte_lpm_rule {
+       uint32_t ip; /**< Rule IP address. */
+       uint8_t  next_hop; /**< Rule next hop. */
+};
+
+/** LPM structure. */
+struct rte_lpm {
+       TAILQ_ENTRY(rte_lpm) next;      /**< Next in list. */
+
+       /* LPM metadata. */
+       char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
+       int mem_location; /**< Location of memory to be allocated. */
+       uint32_t max_rules_per_depth; /**< Max. balanced rules per lpm. */
+       uint32_t used_rules_at_depth[RTE_LPM_MAX_DEPTH]; /**< Rules / depth. */
+
+       /* LPM Tables. */
+       struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \
+                       __rte_cache_aligned; /**< LPM tbl24 table. */
+       struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \
+                       __rte_cache_aligned; /**< LPM tbl8 table. */
+       struct rte_lpm_rule rules_tbl[0] \
+                       __rte_cache_aligned; /**< LPM rules. */
+};
+
+/**
+ * Create an LPM object.
+ *
+ * @param name
+ *   LPM object name
+ * @param socket_id
+ *   NUMA socket ID for LPM table memory allocation
+ * @param max_rules
+ *   Maximum number of LPM rules that can be added
+ * @param mem_location
+ *   Location of memory to be allocated. Can only be RTE_LPM_HEAP or
+ *   RTE_LPM_MEMZONE
+ * @return
+ *   Handle to LPM object on success, NULL otherwise with rte_errno set
+ *   to an appropriate values. Possible rte_errno values include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - E_RTE_NO_TAILQ - no tailq list could be got for the lpm object list
+ *    - EINVAL - invalid parameter passed to function
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_lpm *
+rte_lpm_create(const char *name, int socket_id, int max_rules,
+               int mem_location);
+
+/**
+ * Find an existing LPM object and return a pointer to it.
+ *
+ * @param name
+ *   Name of the lpm object as passed to rte_lpm_create()
+ * @return
+ *   Pointer to lpm object or NULL if object not found with rte_errno
+ *   set appropriately. Possible rte_errno values include:
+ *    - ENOENT - required entry not available to return.
+ */
+struct rte_lpm *
+rte_lpm_find_existing(const char *name);
+
+/**
+ * Free an LPM object.
+ *
+ * @param lpm
+ *   LPM object handle
+ * @return
+ *   None
+ */
+void
+rte_lpm_free(struct rte_lpm *lpm);
+
+/**
+ * Add a rule to the LPM table.
+ *
+ * @param lpm
+ *   LPM object handle
+ * @param ip
+ *   IP of the rule to be added to the LPM table
+ * @param depth
+ *   Depth of the rule to be added to the LPM table
+ * @param next_hop
+ *   Next hop of the rule to be added to the LPM table
+ * @return
+ *   0 on success, negative value otherwise
+ */
+int
+rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, uint8_t next_hop);
+
+/**
+ * Delete a rule from the LPM table.
+ *
+ * @param lpm
+ *   LPM object handle
+ * @param ip
+ *   IP of the rule to be deleted from the LPM table
+ * @param depth
+ *   Depth of the rule to be deleted from the LPM table
+ * @return
+ *   0 on success, negative value otherwise
+ */
+int
+rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth);
+
+/**
+ * Delete all rules from the LPM table.
+ *
+ * @param lpm
+ *   LPM object handle
+ */
+void
+rte_lpm_delete_all(struct rte_lpm *lpm);
+
+/**
+ * Lookup an IP into the LPM table.
+ *
+ * @param lpm
+ *   LPM object handle
+ * @param ip
+ *   IP to be looked up in the LPM table
+ * @param next_hop
+ *   Next hop of the most specific rule found for IP (valid on lookup hit only)
+ * @return
+ *   -EINVAL for incorrect arguments, -ENOENT on lookup miss, 0 on lookup hit
+ */
+static inline int
+rte_lpm_lookup(struct rte_lpm *lpm, uint32_t ip, uint8_t *next_hop)
+{
+       uint32_t tbl24_index, tbl8_group_index, tbl8_index;
+
+       /* DEBUG: Check user input arguments. */
+       RTE_LPM_RETURN_IF_TRUE(((lpm == NULL) || (next_hop == NULL)), -EINVAL);
+
+       /* Calculate index into tbl24. */
+       tbl24_index = (ip >> 8);
+
+       /*
+        * Use the tbl24_index to access the required tbl24 entry then check if
+        * the tbl24 entry is INVALID, if so return -ENOENT.
+        */
+       if (!lpm->tbl24[tbl24_index].valid){
+               return -ENOENT; /* Lookup miss. */
+       }
+       /*
+        * If tbl24 entry is valid check if it is NOT extended (i.e. it does
+        * not use a tbl8 extension) if so return the next hop.
+        */
+       if (likely(lpm->tbl24[tbl24_index].ext_entry == 0)) {
+               *next_hop = lpm->tbl24[tbl24_index].next_hop;
+               return 0; /* Lookup hit. */
+       }
+
+       /*
+        * If tbl24 entry is valid and extended calculate the index into the
+        * tbl8 entry.
+        */
+       tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex;
+       tbl8_index = (tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
+                       (ip & 0xFF);
+
+       /* Check if the tbl8 entry is invalid and if so return -ENOENT. */
+       if (!lpm->tbl8[tbl8_index].valid)
+               return -ENOENT;/* Lookup miss. */
+
+       /* If the tbl8 entry is valid return return the next_hop. */
+       *next_hop = lpm->tbl8[tbl8_index].next_hop;
+       return 0; /* Lookup hit. */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_LPM_H_ */
diff --git a/lib/librte_malloc/Makefile b/lib/librte_malloc/Makefile
new file mode 100644 (file)
index 0000000..8518c3b
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_malloc.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MALLOC) := rte_malloc.c malloc_elem.c malloc_heap.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MALLOC)-include := rte_malloc.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MALLOC) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_malloc/malloc_elem.c b/lib/librte_malloc/malloc_elem.c
new file mode 100644 (file)
index 0000000..1c90908
--- /dev/null
@@ -0,0 +1,280 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_debug.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+#define MIN_DATA_SIZE (CACHE_LINE_SIZE * 2)
+
+/*
+ * initialise a general malloc_elem header structure
+ */
+void
+malloc_elem_init(struct malloc_elem *elem,
+               struct malloc_heap *heap, size_t size)
+{
+       elem->heap = heap;
+       elem->prev = elem->next_free = NULL;
+       elem->state = ELEM_FREE;
+       elem->size = size;
+       elem->pad = 0;
+       set_header(elem);
+       set_trailer(elem);
+}
+
+/*
+ * initialise a dummy malloc_elem header for the end-of-memzone marker
+ */
+void
+malloc_elem_mkend(struct malloc_elem *elem, struct malloc_elem *prev)
+{
+       malloc_elem_init(elem, prev->heap, 0);
+       elem->prev = prev;
+       elem->state = ELEM_BUSY; /* mark busy so its never merged */
+}
+
+/*
+ * calculate the starting point of where data of the requested size
+ * and alignment would fit in the current element. If the data doesn't
+ * fit, return NULL.
+ */
+static void *
+elem_start_pt(struct malloc_elem *elem, size_t size, unsigned align)
+{
+       const uintptr_t end_pt = (uintptr_t)elem +
+                       elem->size - MALLOC_ELEM_TRAILER_LEN;
+       const uintptr_t new_data_start = rte_align_floor_int((end_pt - size),align);
+       const uintptr_t new_elem_start = new_data_start - MALLOC_ELEM_HEADER_LEN;
+
+       /* if the new start point is before the exist start, it won't fit */
+       return (new_elem_start < (uintptr_t)elem) ? NULL : (void *)new_elem_start;
+}
+
+/*
+ * use elem_start_pt to determine if we get meet the size and
+ * alignment request from the current element
+ */
+int
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align)
+{
+       return elem_start_pt(elem, size, align) != NULL;
+}
+
+/*
+ * split an existing element into two smaller elements at the given
+ * split_pt parameter.
+ */
+static void
+split_elem(struct malloc_elem *elem, struct malloc_elem *split_pt)
+{
+       struct malloc_elem *next_elem = RTE_PTR_ADD(elem, elem->size);
+       const unsigned old_elem_size = (uintptr_t)split_pt - (uintptr_t)elem;
+       const unsigned new_elem_size = elem->size - old_elem_size;
+
+       malloc_elem_init(split_pt, elem->heap, new_elem_size);
+       split_pt->prev = elem;
+       next_elem->prev = split_pt;
+       elem->size = old_elem_size;
+       set_trailer(elem);
+}
+
+/*
+ * reserve a block of data in an existing malloc_elem. If the malloc_elem
+ * is much larger than the data block requested, we split the element in two.
+ * This function is only called from malloc_heap_alloc so parameter checking
+ * is not done here, as it's done there previously.
+ */
+struct malloc_elem *
+malloc_elem_alloc(struct malloc_elem *elem, size_t size,
+               unsigned align, struct malloc_elem *prev_free)
+{
+       struct malloc_elem *new_elem = elem_start_pt(elem, size, align);
+       const unsigned old_elem_size = (uintptr_t)new_elem - (uintptr_t)elem;
+
+       if (old_elem_size <= MALLOC_ELEM_OVERHEAD + MIN_DATA_SIZE){
+               /* don't split it, pad the element instead */
+               elem->state = ELEM_BUSY;
+               elem->pad = old_elem_size;
+
+               /* put a dummy header in padding, to point to real element header */
+               if (elem->pad > 0){ /* pad will be at least 64-bytes, as everything
+                                    * is cache-line aligned */
+                       new_elem->pad = elem->pad;
+                       new_elem->state = ELEM_PAD;
+                       new_elem->size = elem->size - elem->pad;
+                       set_header(new_elem);
+               }
+               /* remove element from free list */
+               if (prev_free == NULL)
+                       elem->heap->free_head = elem->next_free;
+               else
+                       prev_free->next_free = elem->next_free;
+
+               return new_elem;
+       }
+
+       /* we are going to split the element in two. The original element
+        * remains free, and the new element is the one allocated, so no free list
+        * changes need to be made.
+        */
+       split_elem(elem, new_elem);
+       new_elem->state = ELEM_BUSY;
+
+       return new_elem;
+}
+
+/*
+ * joing two struct malloc_elem together. elem1 and elem2 must
+ * be contiguous in memory.
+ */
+static inline void
+join_elem(struct malloc_elem *elem1, struct malloc_elem *elem2)
+{
+       struct malloc_elem *next = RTE_PTR_ADD(elem2, elem2->size);
+       elem1->size += elem2->size;
+       next->prev = elem1;
+}
+
+/*
+ * scan the free list, and remove the request element from that
+ * free list. (Free list to scan is got from heap pointer in element)
+ */
+static inline void
+remove_from_free_list(struct malloc_elem *elem)
+{
+       if (elem == elem->heap->free_head)
+               elem->heap->free_head = elem->next_free;
+       else{
+               struct malloc_elem *prev_free = elem->heap->free_head;
+               while (prev_free && prev_free->next_free != elem)
+                       prev_free = prev_free->next_free;
+               if (!prev_free)
+                       rte_panic("Corrupted free list\n");
+               prev_free->next_free = elem->next_free;
+       }
+}
+
+/*
+ * free a malloc_elem block by adding it to the free list. If the
+ * blocks either immediately before or immediately after newly freed block
+ * are also free, the blocks are merged together.
+ */
+int
+malloc_elem_free(struct malloc_elem *elem)
+{
+       if (!malloc_elem_cookies_ok(elem) || elem->state != ELEM_BUSY)
+               return -1;
+
+       rte_spinlock_lock(&(elem->heap->lock));
+       struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
+       if (next->state == ELEM_FREE){
+               /* join to this one, and remove from free list */
+               join_elem(elem, next);
+               remove_from_free_list(next);
+       }
+
+       /* check if previous element is free, if so join with it and return,
+        * no need to update free list, as that element is already there
+        */
+       if (elem->prev != NULL && elem->prev->state == ELEM_FREE)
+               join_elem(elem->prev, elem);
+       /* otherwise add ourselves to the free list */
+       else {
+               elem->next_free = elem->heap->free_head;
+               elem->heap->free_head = elem;
+               elem->state = ELEM_FREE;
+               elem->pad = 0;
+       }
+       rte_spinlock_unlock(&(elem->heap->lock));
+       return 0;
+}
+
+/*
+ * attempt to resize a malloc_elem by expanding into any free space
+ * immediately after it in memory.
+ */
+int
+malloc_elem_resize(struct malloc_elem *elem, size_t size)
+{
+       const size_t new_size = size + MALLOC_ELEM_OVERHEAD;
+       /* if we request a smaller size, then always return ok */
+       const size_t current_size = elem->size - elem->pad;
+       if (current_size >= new_size)
+               return 0;
+
+       struct malloc_elem *next = RTE_PTR_ADD(elem, elem->size);
+       rte_spinlock_lock(&elem->heap->lock);
+       if (next ->state != ELEM_FREE)
+               goto err_return;
+       if (current_size + next->size < new_size)
+               goto err_return;
+
+       /* we now know the element fits, so join the two, then remove from free
+        * list
+        */
+       join_elem(elem, next);
+       remove_from_free_list(next);
+
+       if (elem->size - new_size > MIN_DATA_SIZE + MALLOC_ELEM_OVERHEAD){
+               /* now we have a big block together. Lets cut it down a bit, by splitting */
+               struct malloc_elem *split_pt = RTE_PTR_ADD(elem, new_size);
+               split_pt = RTE_ALIGN_CEIL(split_pt, CACHE_LINE_SIZE);
+               split_elem(elem, split_pt);
+               split_pt->state = ELEM_FREE;
+               split_pt->next_free = elem->heap->free_head;
+               elem->heap->free_head = split_pt;
+       }
+       rte_spinlock_unlock(&elem->heap->lock);
+       return 0;
+
+err_return:
+       rte_spinlock_unlock(&elem->heap->lock);
+       return -1;
+}
diff --git a/lib/librte_malloc/malloc_elem.h b/lib/librte_malloc/malloc_elem.h
new file mode 100644 (file)
index 0000000..4328c37
--- /dev/null
@@ -0,0 +1,177 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef MALLOC_ELEM_H_
+#define MALLOC_ELEM_H_
+
+/* dummy definition of struct so we can use pointers to it in malloc_elem struct */
+struct malloc_heap;
+
+enum elem_state {
+       ELEM_FREE = 0,
+       ELEM_BUSY,
+       ELEM_PAD  /* element is a padding-only header */
+};
+
+struct malloc_elem {
+       struct malloc_heap *heap;
+       struct malloc_elem *volatile prev;      /* points to prev elem in memzone */
+       struct malloc_elem *volatile next_free; /* to make list of free elements */
+       volatile enum elem_state state;
+       uint32_t pad;
+       volatile size_t size;
+#ifdef RTE_LIBRTE_MALLOC_DEBUG
+       uint64_t header_cookie;         /* Cookie marking start of data */
+                                       /* trailer cookie at start + size */
+#endif
+} __rte_cache_aligned;
+
+#ifndef RTE_LIBRTE_MALLOC_DEBUG
+static const unsigned MALLOC_ELEM_TRAILER_LEN = 0;
+
+/* dummy function - just check if pointer is non-null */
+static inline int
+malloc_elem_cookies_ok(struct malloc_elem *elem){ return elem != NULL; }
+
+/* dummy function - no header if malloc_debug is not enabled */
+static inline void
+set_header(struct malloc_elem *elem __rte_unused){ }
+
+/* dummy function - no trailer if malloc_debug is not enabled */
+static inline void
+set_trailer(struct malloc_elem *elem __rte_unused){ }
+
+
+#else
+static const unsigned MALLOC_ELEM_TRAILER_LEN = CACHE_LINE_SIZE;
+
+#define MALLOC_HEADER_COOKIE   0xbadbadbadadd2e55ULL /**< Header cookie. */
+#define MALLOC_TRAILER_COOKIE  0xadd2e55badbadbadULL /**< Trailer cookie.*/
+
+/* define macros to make referencing the header and trailer cookies easier */
+#define MALLOC_ELEM_TRAILER(elem) (*((uint64_t*)RTE_PTR_ADD(elem, \
+               elem->size - MALLOC_ELEM_TRAILER_LEN)))
+#define MALLOC_ELEM_HEADER(elem) (elem->header_cookie)
+
+static inline void
+set_header(struct malloc_elem *elem)
+{
+       if (elem != NULL)
+               MALLOC_ELEM_HEADER(elem) = MALLOC_HEADER_COOKIE;
+}
+
+static inline void
+set_trailer(struct malloc_elem *elem)
+{
+       if (elem != NULL)
+               MALLOC_ELEM_TRAILER(elem) = MALLOC_TRAILER_COOKIE;
+}
+
+/* check that the header and trailer cookies are set correctly */
+static inline int
+malloc_elem_cookies_ok(struct malloc_elem *elem)
+{
+       return (elem != NULL &&
+                       MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE &&
+                       MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE);
+}
+
+#endif
+
+static const unsigned MALLOC_ELEM_HEADER_LEN = sizeof(struct malloc_elem);
+#define MALLOC_ELEM_OVERHEAD (MALLOC_ELEM_HEADER_LEN + MALLOC_ELEM_TRAILER_LEN)
+
+/*
+ * Given a pointer to the start of a memory block returned by malloc, get
+ * the actual malloc_elem header for that block.
+ */
+static inline struct malloc_elem *
+malloc_elem_from_data(void *data)
+{
+       if (data == NULL)
+               return NULL;
+
+       struct malloc_elem *elem = RTE_PTR_SUB(data, MALLOC_ELEM_HEADER_LEN);
+       if (!malloc_elem_cookies_ok(elem))
+               return NULL;
+       return elem->state != ELEM_PAD ? elem:  RTE_PTR_SUB(elem, elem->pad);
+}
+
+/*
+ * initialise a malloc_elem header
+ */
+void
+malloc_elem_init(struct malloc_elem *elem,
+               struct malloc_heap *heap,
+               size_t size);
+
+/*
+ * initialise a dummy malloc_elem header for the end-of-memzone marker
+ */
+void
+malloc_elem_mkend(struct malloc_elem *elem,
+               struct malloc_elem *prev_free);
+
+/*
+ * return true if the current malloc_elem can hold a block of data
+ * of the requested size and with the requested alignment
+ */
+int
+malloc_elem_can_hold(struct malloc_elem *elem, size_t size, unsigned align);
+
+/*
+ * reserve a block of data in an existing malloc_elem. If the malloc_elem
+ * is much larger than the data block requested, we split the element in two.
+ */
+struct malloc_elem *
+malloc_elem_alloc(struct malloc_elem *elem, size_t size,
+               unsigned align, struct malloc_elem *prev_free);
+
+/*
+ * free a malloc_elem block by adding it to the free list. If the
+ * blocks either immediately before or immediately after newly freed block
+ * are also free, the blocks are merged together.
+ */
+int
+malloc_elem_free(struct malloc_elem *elem);
+
+/*
+ * attempt to resize a malloc_elem by expanding into any free space
+ * immediately after it in memory.
+ */
+int
+malloc_elem_resize(struct malloc_elem *elem, size_t size);
+
+#endif /* MALLOC_ELEM_H_ */
diff --git a/lib/librte_malloc/malloc_heap.c b/lib/librte_malloc/malloc_heap.c
new file mode 100644 (file)
index 0000000..3f621ab
--- /dev/null
@@ -0,0 +1,181 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+#define QUOTE_(x) #x
+#define QUOTE(x) QUOTE_(x)
+/* since the memzone size starts with a digit, it will appear unquoted in
+ * rte_config.h, so quote it so it can be passed to rte_str_to_size */
+#define MALLOC_MEMZONE_SIZE QUOTE(RTE_MALLOC_MEMZONE_SIZE)
+
+/*
+ * returns the configuration setting for the memzone size as a size_t value
+ */
+static inline size_t
+get_malloc_memzone_size(void)
+{
+       return rte_str_to_size(MALLOC_MEMZONE_SIZE);
+}
+
+/*
+ * reserve an extra memory zone and make it available for use by a particular
+ * heap. This reserves the zone and sets a dummy malloc_elem header at the end
+ * to prevent overflow. The rest of the zone is added to free list as a single
+ * large free block
+ */
+static int
+malloc_heap_add_memzone(struct malloc_heap *heap, size_t size, unsigned align)
+{
+       const unsigned mz_flags = 0;
+       const size_t min_size = get_malloc_memzone_size();
+       /* ensure the data we want to allocate will fit in the memzone */
+       size_t mz_size = size + align + MALLOC_ELEM_OVERHEAD * 2;
+       if (mz_size < min_size)
+               mz_size = min_size;
+
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       rte_snprintf(mz_name, sizeof(mz_name), "MALLOC_S%u_HEAP_%u",
+                       heap->numa_socket, heap->mz_count++);
+       const struct rte_memzone *mz = rte_memzone_reserve(mz_name, mz_size,
+                       heap->numa_socket, mz_flags);
+       if (mz == NULL)
+               return -1;
+
+       /* allocate the memory block headers, one at end, one at start */
+       struct malloc_elem *start_elem = (struct malloc_elem *)mz->addr;
+       struct malloc_elem *end_elem = RTE_PTR_ADD(mz->addr,
+                       mz_size - MALLOC_ELEM_OVERHEAD);
+       end_elem = RTE_ALIGN_FLOOR(end_elem, CACHE_LINE_SIZE);
+
+       const unsigned elem_size = (uintptr_t)end_elem - (uintptr_t)start_elem;
+       malloc_elem_init(start_elem, heap, elem_size);
+       malloc_elem_mkend(end_elem, start_elem);
+
+       start_elem->next_free = heap->free_head;
+       heap->free_head = start_elem;
+       return 0;
+}
+
+/*
+ * initialise a malloc heap object. The heap is locked with a private
+ * lock while being initialised. This function should only be called the
+ * first time a thread calls malloc - if even then, as heaps are per-socket
+ * not per-thread.
+ */
+static void
+malloc_heap_init(struct malloc_heap *heap)
+{
+       static rte_spinlock_t init_lock = RTE_SPINLOCK_INITIALIZER;
+       rte_spinlock_lock(&init_lock);
+       if (!heap->initialised) {
+               heap->free_head = NULL;
+               heap->mz_count = 0;
+               heap->numa_socket = malloc_get_numa_socket();
+               rte_spinlock_init(&heap->lock);
+               heap->initialised = INITIALISED;
+       }
+       rte_spinlock_unlock(&init_lock);
+}
+
+/*
+ * Iterates through the freelist for a heap to find a free element
+ * which can store data of the required size and with the requested alignment.
+ * Returns null on failure, or pointer to element on success, with the pointer
+ * to the previous element in the list, if any, being returned in a parameter
+ * (to make removing the element from the free list faster).
+ */
+static struct malloc_elem *
+find_suitable_element(struct malloc_heap *heap, size_t size,
+               unsigned align, struct malloc_elem **prev)
+{
+       struct malloc_elem *elem = heap->free_head;
+       *prev = NULL;
+       while(elem){
+               if (malloc_elem_can_hold(elem, size, align))
+                       break;
+               *prev = elem;
+               elem = elem->next_free;
+       }
+       return elem;
+}
+
+/*
+ * Main function called by malloc to allocate a block of memory from the
+ * heap. It locks the free list, scans it, and adds a new memzone if the
+ * scan fails. Once the new memzone is added, it re-scans and should return
+ * the new element after releasing the lock.
+ */
+void *
+malloc_heap_alloc(struct malloc_heap *heap,
+               const char *type __attribute__((unused)), size_t size, unsigned align)
+{
+       if (!heap->initialised)
+               malloc_heap_init(heap);
+
+       size = CACHE_LINE_ROUNDUP(size);
+       align = CACHE_LINE_ROUNDUP(align);
+       rte_spinlock_lock(&heap->lock);
+
+       struct malloc_elem *prev, *elem = find_suitable_element(heap,
+                       size, align, &prev);
+       if (elem == NULL){
+               malloc_heap_add_memzone(heap, size, align);
+               elem = find_suitable_element(heap, size, align, &prev);
+       }
+       if (elem != NULL)
+               elem = malloc_elem_alloc(elem, size, align, prev);
+       rte_spinlock_unlock(&heap->lock);
+       return elem == NULL ? NULL : (void *)(&elem[1]);
+}
diff --git a/lib/librte_malloc/malloc_heap.h b/lib/librte_malloc/malloc_heap.h
new file mode 100644 (file)
index 0000000..cf599d9
--- /dev/null
@@ -0,0 +1,68 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef MALLOC_HEAP_H_
+#define MALLOC_HEAP_H_
+
+enum heap_state {
+       NOT_INITIALISED = 0,
+       INITIALISED
+};
+
+struct malloc_heap {
+       enum heap_state initialised;
+       unsigned numa_socket;
+       volatile unsigned mz_count;
+       rte_spinlock_t lock;
+       struct malloc_elem * volatile free_head;
+} __rte_cache_aligned;
+
+#define RTE_MALLOC_SOCKET_DEFAULT      0
+
+static inline unsigned
+malloc_get_numa_socket(void)
+{
+       unsigned malloc_socket = RTE_MALLOC_SOCKET_DEFAULT;
+       #ifdef RTE_MALLOC_PER_NUMA_NODE
+               malloc_socket = rte_socket_id();
+       #endif
+       return malloc_socket;
+}
+
+void *
+malloc_heap_alloc(struct malloc_heap *heap, const char *type,
+               size_t size, unsigned align);
+
+#endif /* MALLOC_HEAP_H_ */
diff --git a/lib/librte_malloc/rte_malloc.c b/lib/librte_malloc/rte_malloc.c
new file mode 100644 (file)
index 0000000..4549884
--- /dev/null
@@ -0,0 +1,166 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/queue.h>
+
+#include <rte_memcpy.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_branch_prediction.h>
+#include <rte_debug.h>
+#include <rte_launch.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+
+#include <rte_malloc.h>
+#include "malloc_elem.h"
+#include "malloc_heap.h"
+
+static struct malloc_heap malloc_heap[RTE_MAX_NUMA_NODES] = {
+               { .initialised = NOT_INITIALISED }
+};
+
+/* Free the memory space back to heap */
+void rte_free(void *addr)
+{
+       if (addr == NULL) return;
+       if (malloc_elem_free(malloc_elem_from_data(addr)) < 0)
+               rte_panic("Fatal error: Invalid memory\n");
+}
+
+/*
+ * Allocate memory on default heap.
+ */
+void *
+rte_malloc(const char *type, size_t size, unsigned align)
+{
+       unsigned malloc_socket = malloc_get_numa_socket();
+       /* return NULL if size is 0 or alignment is not power-of-2 */
+       if (size == 0 || !rte_is_power_of_2(align))
+               return NULL;
+       return malloc_heap_alloc(&malloc_heap[malloc_socket], type,
+                       size, align == 0 ? 1 : align);
+}
+
+/*
+ * Allocate zero'd memory on default heap.
+ */
+void *
+rte_zmalloc(const char *type, size_t size, unsigned align)
+{
+       void *ptr = rte_malloc(type, size, align);
+
+       if (ptr != NULL)
+               memset(ptr, 0, size);
+       return ptr;
+}
+
+/*
+ * Allocate zero'd memory on default heap.
+ */
+void *
+rte_calloc(const char *type, size_t num, size_t size, unsigned align)
+{
+       return rte_zmalloc(type, num * size, align);
+}
+
+/*
+ * Resize allocated memory.
+ */
+void *
+rte_realloc(void *ptr, size_t size, unsigned align)
+{
+       if (ptr == NULL)
+               return rte_malloc(NULL, size, align);
+
+       struct malloc_elem *elem = malloc_elem_from_data(ptr);
+       if (elem == NULL)
+               rte_panic("Fatal error: memory corruption detected\n");
+
+       size = CACHE_LINE_ROUNDUP(size), align = CACHE_LINE_ROUNDUP(align);
+       /* check alignment matches first, and if ok, see if we can resize block */
+       if (RTE_ALIGN(ptr,align) == ptr &&
+                       malloc_elem_resize(elem, size) == 0)
+               return ptr;
+
+       /* either alignment is off, or we have no room to expand,
+        * so move data. */
+       void *new_ptr = rte_malloc(NULL, size, align);
+       if (new_ptr == NULL)
+               return NULL;
+       const unsigned old_size = elem->size - MALLOC_ELEM_OVERHEAD;
+       rte_memcpy(new_ptr, ptr, old_size < size ? old_size : size);
+       rte_free(ptr);
+
+       return new_ptr;
+}
+
+int
+rte_malloc_validate(void *ptr, size_t *size)
+{
+       struct malloc_elem *elem = malloc_elem_from_data(ptr);
+       if (!malloc_elem_cookies_ok(elem))
+               return -1;
+       if (size != NULL)
+               *size = elem->size - elem->pad - MALLOC_ELEM_OVERHEAD;
+       return 0;
+}
+/*
+ * TODO: Print stats on memory type. If type is NULL, info on all types is printed
+ */
+void
+rte_malloc_dump_stats(__rte_unused const char *type)
+{
+       return;
+}
+
+/*
+ * TODO: Set limit to memory that can be allocated to memory type
+ */
+int
+rte_malloc_set_limit(__rte_unused const char *type,
+               __rte_unused size_t max)
+{
+       return 0;
+}
+
diff --git a/lib/librte_malloc/rte_malloc.h b/lib/librte_malloc/rte_malloc.h
new file mode 100644 (file)
index 0000000..29cff55
--- /dev/null
@@ -0,0 +1,212 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MALLOC_H_
+#define _RTE_MALLOC_H_
+
+/**
+ * @file
+ * RTE Malloc. This library provides methods for dynamically allocating memory
+ * from hugepages.
+ */
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * This function allocates memory from the huge-page area of memory. The memory
+ * is not cleared.
+ *
+ * @param type
+ *   A string identifying the type of allocated objects (useful for debug
+ *   purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ *   Size (in bytes) to be allocated.
+ * @param align
+ *   If 0, the return is a pointer that is suitably aligned for any kind of
+ *   variable (in the same manner as malloc()).
+ *   Otherwise, the return is a pointer that is a multiple of *align*. In
+ *   this case, it must be a power of two. (Minimum alignment is the
+ *   cacheline size, i.e. 64-bytes)
+ * @return
+ *   - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ *     align is not a power of two).
+ *   - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_malloc(const char *type, size_t size, unsigned align);
+
+/**
+ * Allocate zero'ed memory from the heap.
+ *
+ * Equivalent to rte_malloc() except that the memory zone is
+ * initialised with zeros.
+ *
+ * @param type
+ *   A string identifying the type of allocated objects (useful for debug
+ *   purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param size
+ *   Size (in bytes) to be allocated.
+ * @param align
+ *   If 0, the return is a pointer that is suitably aligned for any kind of
+ *   variable (in the same manner as malloc()).
+ *   Otherwise, the return is a pointer that is a multiple of *align*. In
+ *   this case, it must obviously be a power of two. (Minimum alignment is the
+ *   cacheline size, i.e. 64-bytes)
+ * @return
+ *   - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ *     align is not a power of two).
+ *   - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_zmalloc(const char *type, size_t size, unsigned align);
+
+/**
+ * Replacement function for calloc(), using huge-page memory. Memory area is
+ * initialised with zeros.
+ *
+ * @param type
+ *   A string identifying the type of allocated objects (useful for debug
+ *   purposes, such as identifying the cause of a memory leak). Can be NULL.
+ * @param num
+ *   Number of elements to be allocated.
+ * @param size
+ *   Size (in bytes) of a single element.
+ * @param align
+ *   If 0, the return is a pointer that is suitably aligned for any kind of
+ *   variable (in the same manner as malloc()).
+ *   Otherwise, the return is a pointer that is a multiple of *align*. In
+ *   this case, it must obviously be a power of two. (Minimum alignment is the
+ *   cacheline size, i.e. 64-bytes)
+ * @return
+ *   - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ *     align is not a power of two).
+ *   - Otherwise, the pointer to the allocated object.
+ */
+void *
+rte_calloc(const char *type, size_t num, size_t size, unsigned align);
+
+/**
+ * Replacement function for realloc(), using huge-page memory. Reserved area
+ * memory is resized, preserving contents.
+ *
+ * @param ptr
+ *   Pointer to already allocated memory
+ * @param size
+ *   Size (in bytes) of new area. If this is 0, memory is freed.
+ * @param align
+ *   If 0, the return is a pointer that is suitably aligned for any kind of
+ *   variable (in the same manner as malloc()).
+ *   Otherwise, the return is a pointer that is a multiple of *align*. In
+ *   this case, it must obviously be a power of two. (Minimum alignment is the
+ *   cacheline size, i.e. 64-bytes)
+ * @return
+ *   - NULL on error. Not enough memory, or invalid arguments (size is 0,
+ *     align is not a power of two).
+ *   - Otherwise, the pointer to the reallocated memory.
+ */
+void *
+rte_realloc(void *ptr, size_t size, unsigned align);
+
+/**
+ * Frees the memory space pointed to by the provided pointer.
+ *
+ * This pointer must have been returned by a previous call to
+ * rte_malloc(), rte_zmalloc(), rte_calloc() or rte_realloc(). The behaviour of
+ * rte_free() is undefined if the pointer does not match this requirement.
+ *
+ * If the pointer is NULL, the function does nothing.
+ *
+ * @param ptr
+ *   The pointer to memory to be freed.
+ */
+void
+rte_free(void *ptr);
+
+/**
+ * If malloc debug is enabled, check a memory block for header
+ * and trailer markers to indicate that all is well with the block.
+ * If size is non-null, also return the size of the block.
+ *
+ * @param ptr
+ *   pointer to the start of a data block, must have been returned
+ *   by a previous call to rte_malloc(), rte_zmalloc(), rte_calloc()
+ *   or rte_realloc()
+ * @param size
+ *   if non-null, and memory block pointer is valid, returns the size
+ *   of the memory block
+ * @return
+ *   -1 on error, invalid pointer passed or header and trailer markers
+ *   are missing or corrupted
+ *   0 on success
+ */
+int
+rte_malloc_validate(void *ptr, size_t *size);
+
+/**
+ * Dump statistics.
+ *
+ * Dump for the specified type to the console. If the type argument is
+ * NULL, all memory types will be dumped.
+ *
+ * @param type
+ *   A string identifying the type of objects to dump, or NULL
+ *   to dump all objects.
+ */
+void
+rte_malloc_dump_stats(const char *type);
+
+/**
+ * Set the maximum amount of allocated memory for this type.
+ *
+ * @param type
+ *   A string identifying the type of allocated objects.
+ * @param max
+ *   The maximum amount of allocated bytes for this type.
+ * @return
+ *   - 0: Success.
+ *   - (-1): Error.
+ */
+int
+rte_malloc_set_limit(const char *type, size_t max);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MALLOC_H_ */
diff --git a/lib/librte_mbuf/Makefile b/lib/librte_mbuf/Makefile
new file mode 100644 (file)
index 0000000..db9dc1f
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_mbuf.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MBUF) := rte_mbuf.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MBUF)-include := rte_mbuf.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MBUF) += lib/librte_eal lib/librte_mempool
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mbuf/rte_mbuf.c b/lib/librte_mbuf/rte_mbuf.c
new file mode 100644 (file)
index 0000000..d011fda
--- /dev/null
@@ -0,0 +1,252 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <ctype.h>
+#include <sys/queue.h>
+
+#include <rte_debug.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+
+/*
+ * ctrlmbuf constructor, given as a callback function to
+ * rte_mempool_create()
+ */
+void
+rte_ctrlmbuf_init(struct rte_mempool *mp,
+                 __attribute__((unused)) void *opaque_arg,
+                 void *_m,
+                 __attribute__((unused)) unsigned i)
+{
+       struct rte_mbuf *m = _m;
+
+       memset(m, 0, mp->elt_size);
+
+       /* start of buffer is just after mbuf structure */
+       m->buf_addr = (char *)m + sizeof(struct rte_mbuf);
+       m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
+                       sizeof(struct rte_mbuf);
+       m->buf_len = (uint16_t) (mp->elt_size - sizeof(struct rte_mbuf));
+
+       /* init some constant fields */
+       m->type = RTE_MBUF_CTRL;
+       m->ctrl.data = (char *)m->buf_addr;
+       m->pool = (struct rte_mempool *)mp;
+}
+
+/*
+ * pktmbuf pool constructor, given as a callback function to
+ * rte_mempool_create()
+ */
+void
+rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
+{
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       uint16_t roomsz;
+
+       mbp_priv = rte_mempool_get_priv(mp);
+       roomsz = (uint16_t)(uintptr_t)opaque_arg;
+
+       /* Use default data room size. */
+       if (0 == roomsz)
+               roomsz = 2048 + RTE_PKTMBUF_HEADROOM;
+
+       mbp_priv->mbuf_data_room_size = roomsz;
+}
+
+/*
+ * pktmbuf constructor, given as a callback function to
+ * rte_mempool_create().
+ * Set the fields of a packet mbuf to their default values.
+ */
+void
+rte_pktmbuf_init(struct rte_mempool *mp,
+                __attribute__((unused)) void *opaque_arg,
+                void *_m,
+                __attribute__((unused)) unsigned i)
+{
+       struct rte_mbuf *m = _m;
+       uint32_t buf_len = mp->elt_size - sizeof(struct rte_mbuf);
+
+       RTE_MBUF_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+
+       memset(m, 0, mp->elt_size);
+
+       /* start of buffer is just after mbuf structure */
+       m->buf_addr = (char *)m + sizeof(struct rte_mbuf);
+       m->buf_physaddr = rte_mempool_virt2phy(mp, m) +
+                       sizeof(struct rte_mbuf);
+       m->buf_len = (uint16_t)buf_len;
+
+       /* keep some headroom between start of buffer and data */
+       m->pkt.data = (char*) m->buf_addr + RTE_MIN(RTE_PKTMBUF_HEADROOM, m->buf_len);
+
+       /* init some constant fields */
+       m->type = RTE_MBUF_PKT;
+       m->pool = mp;
+       m->pkt.nb_segs = 1;
+       m->pkt.in_port = 0xff;
+}
+
+static void
+rte_pktmbuf_hexdump(const void *buf, unsigned int len)
+{
+       unsigned int i, out, ofs;
+       const unsigned char *data = buf;
+#define LINE_LEN 80
+       char line[LINE_LEN];
+
+       printf("  dump data at 0x%p, len=%u\n", data, len);
+       ofs = 0;
+       while (ofs < len) {
+               out = rte_snprintf(line, LINE_LEN, "  %08X", ofs);
+               for (i = 0; ofs+i < len && i < 16; i++)
+                       out += rte_snprintf(line+out, LINE_LEN - out, " %02X",
+                                       data[ofs+i]&0xff);
+               for (; i <= 16; i++)
+                       out += rte_snprintf(line+out, LINE_LEN - out, "   ");
+               for (i = 0; ofs < len && i < 16; i++, ofs++) {
+                       unsigned char c = data[ofs];
+                       if (!isascii(c) || !isprint(c))
+                               c = '.';
+                       out += rte_snprintf(line+out, LINE_LEN - out, "%c", c);
+               }
+               printf("%s\n", line);
+       }
+}
+
+/* do some sanity checks on a mbuf: panic if it fails */
+void
+rte_mbuf_sanity_check(const struct rte_mbuf *m, enum rte_mbuf_type t,
+                     int is_header)
+{
+       const struct rte_mbuf *m_seg;
+       unsigned nb_segs;
+
+       if (m == NULL)
+               rte_panic("mbuf is NULL\n");
+       if (m->type != (uint8_t)t)
+               rte_panic("bad mbuf type\n");
+
+       /* generic checks */
+       if (m->pool == NULL)
+               rte_panic("bad mbuf pool\n");
+       if (m->buf_physaddr == 0)
+               rte_panic("bad phys addr\n");
+       if (m->buf_addr == NULL)
+               rte_panic("bad virt addr\n");
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+       uint16_t cnt = rte_mbuf_refcnt_read(m);
+       if ((cnt == 0) || (cnt == UINT16_MAX))
+               rte_panic("bad ref cnt\n");
+#endif
+
+       /* nothing to check for ctrl messages */
+       if (m->type == RTE_MBUF_CTRL)
+               return;
+
+       /* check pkt consistency */
+       else if (m->type == RTE_MBUF_PKT) {
+
+               /* nothing to check for sub-segments */
+               if (is_header == 0)
+                       return;
+
+               nb_segs = m->pkt.nb_segs;
+               m_seg = m;
+               while (m_seg && nb_segs != 0) {
+                       m_seg = m_seg->pkt.next;
+                       nb_segs --;
+               }
+               if (nb_segs != 0)
+                       rte_panic("bad nb_segs\n");
+               return;
+       }
+
+       rte_panic("unknown mbuf type\n");
+}
+
+/* dump a mbuf on console */
+void
+rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len)
+{
+       unsigned int len;
+       unsigned nb_segs;
+
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       printf("dump mbuf at 0x%p, phys=%"PRIx64", buf_len=%u\n",
+              m, (uint64_t)m->buf_physaddr, (unsigned)m->buf_len);
+       printf("  pkt_len=%"PRIx32", ol_flags=%"PRIx16", nb_segs=%u, "
+              "in_port=%u\n", m->pkt.pkt_len, m->ol_flags,
+              (unsigned)m->pkt.nb_segs, (unsigned)m->pkt.in_port);
+       nb_segs = m->pkt.nb_segs;
+
+       while (m && nb_segs != 0) {
+               __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
+
+               printf("  segment at 0x%p, data=0x%p, data_len=%u\n",
+                      m, m->pkt.data, (unsigned)m->pkt.data_len);
+               len = dump_len;
+               if (len > m->pkt.data_len)
+                       len = m->pkt.data_len;
+               if (len != 0)
+                       rte_pktmbuf_hexdump(m->pkt.data, len);
+               dump_len -= len;
+               m = m->pkt.next;
+               nb_segs --;
+       }
+}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
new file mode 100644 (file)
index 0000000..5acb6a8
--- /dev/null
@@ -0,0 +1,1019 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MBUF_H_
+#define _RTE_MBUF_H_
+
+/**
+ * @file
+ * RTE Mbuf
+ *
+ * The mbuf library provides the ability to create and destroy buffers
+ * that may be used by the RTE application to store message
+ * buffers. The message buffers are stored in a mempool, using the
+ * RTE mempool library.
+ *
+ * This library provide an API to allocate/free mbufs, manipulate
+ * control message buffer (ctrlmbuf), which are generic message
+ * buffers, and packet buffers (pktmbuf), which are used to carry
+ * network packets.
+ *
+ * To understand the concepts of packet buffers or mbufs, you
+ * should read "TCP/IP Illustrated, Volume 2: The Implementation,
+ * Addison-Wesley, 1995, ISBN 0-201-63354-X from Richard Stevens"
+ * http://www.kohala.com/start/tcpipiv2.html
+ *
+ * The main modification of this implementation is the use of mbuf for
+ * transports other than packets. mbufs can have other types.
+ */
+
+#include <stdint.h>
+
+#include <rte_mempool.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+#include <rte_branch_prediction.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * A control message buffer.
+ */
+struct rte_ctrlmbuf {
+       void *data;        /**< Pointer to data. */
+       uint32_t data_len; /**< Length of data. */
+};
+
+
+/*
+ * Packet Offload Features Flags. It also carry packet type information.
+ * Critical resources. Both rx/tx shared these bits. Be cautious on any change
+ */
+#define PKT_RX_VLAN_PKT      0x0001 /**< RX packet is a 802.1q VLAN packet. */
+#define PKT_RX_RSS_HASH      0x0002 /**< RX packet with RSS hash result. */
+#define PKT_RX_FDIR          0x0004 /**< RX packet with FDIR infos. */
+#define PKT_RX_L4_CKSUM_BAD  0x0008 /**< L4 cksum of RX pkt. is not OK. */
+#define PKT_RX_IP_CKSUM_BAD  0x0010 /**< IP cksum of RX pkt. is not OK. */
+#define PKT_RX_IPV4_HDR      0x0020 /**< RX packet with IPv4 header. */
+#define PKT_RX_IPV4_HDR_EXT  0x0040 /**< RX packet with extended IPv4 header. */
+#define PKT_RX_IPV6_HDR      0x0080 /**< RX packet with IPv6 header. */
+#define PKT_RX_IPV6_HDR_EXT  0x0100 /**< RX packet with extended IPv6 header. */
+#define PKT_RX_IEEE1588_PTP  0x0200 /**< RX IEEE1588 L2 Ethernet PT Packet. */
+#define PKT_RX_IEEE1588_TMST 0x0400 /**< RX IEEE1588 L2/L4 timestamped packet.*/
+
+#define PKT_TX_VLAN_PKT      0x0800 /**< TX packet is a 802.1q VLAN packet. */
+#define PKT_TX_IP_CKSUM      0x1000 /**< IP cksum of TX pkt. computed by NIC. */
+/*
+ * Bit 14~13 used for L4 packet type with checksum enabled.
+ *     00: Reserved
+ *     01: TCP checksum
+ *     10: SCTP checksum
+ *     11: UDP checksum
+ */
+#define PKT_TX_L4_MASK       0x6000 /**< Mask bits for L4 checksum offload request. */
+#define PKT_TX_L4_NO_CKSUM   0x0000 /**< Disable L4 cksum of TX pkt. */
+#define PKT_TX_TCP_CKSUM     0x2000 /**< TCP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_SCTP_CKSUM    0x4000 /**< SCTP cksum of TX pkt. computed by NIC. */
+#define PKT_TX_UDP_CKSUM     0x6000 /**< UDP cksum of TX pkt. computed by NIC. */
+/* Bit 15 */
+#define PKT_TX_IEEE1588_TMST 0x8000 /**< TX IEEE1588 packet to timestamp. */
+
+/**
+ * Bit Mask to indicate what bits required for building TX context
+ */
+#define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
+
+/* Compare mask for vlan_macip_lens, used for context build up */
+#define TX_VLAN_CMP_MASK        0xFFFF0000  /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK     0x0000FE00  /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK      0x000001FF  /**< IP  length - 9-bits. */
+/** MAC+IP length. */ 
+#define TX_MACIP_LEN_CMP_MASK   (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) 
+
+/**
+ * A packet message buffer.
+ */
+struct rte_pktmbuf {
+       /* valid for any segment */
+       struct rte_mbuf *next;  /**< Next segment of scattered packet. */
+       void* data;             /**< Start address of data in segment buffer. */
+       uint16_t data_len;      /**< Amount of data in segment buffer. */
+
+       /* these fields are valid for first segment only */
+       uint8_t nb_segs;        /**< Number of segments. */
+       uint8_t in_port;        /**< Input port. */
+       uint32_t pkt_len;       /**< Total pkt len: sum of all segment data_len. */
+
+       /* offload features */
+       uint16_t vlan_tci;      /**< VLAN Tag Control Identifier (CPU order). */
+       uint16_t l2_len:7;      /**< L2 (MAC) Header Length. */
+       uint16_t l3_len:9;      /**< L3 (IP) Header Length. */
+       union {
+               uint32_t rss;       /**< RSS hash result if RSS enabled */
+               struct {
+                       uint16_t hash;
+                       uint16_t id;
+               } fdir;             /**< Filter identifier if FDIR enabled */
+       } hash;                 /**< hash information */
+};
+
+/**
+ * This enum indicates the mbuf type.
+ */
+enum rte_mbuf_type {
+       RTE_MBUF_CTRL,  /**< Control mbuf. */
+       RTE_MBUF_PKT,   /**< Packet mbuf. */
+};
+
+/**
+ * The generic rte_mbuf, containing a packet mbuf or a control mbuf.
+ */
+struct rte_mbuf {
+       struct rte_mempool *pool; /**< Pool from which mbuf was allocated. */
+       void *buf_addr;           /**< Virtual address of segment buffer. */
+       phys_addr_t buf_physaddr; /**< Physical address of segment buffer. */
+       uint16_t buf_len;         /**< Length of segment buffer. */
+#ifdef RTE_MBUF_SCATTER_GATHER
+       /**
+        * 16-bit Reference counter.
+        * It should only be accessed using the following functions:
+        * rte_mbuf_refcnt_update(), rte_mbuf_refcnt_read(), and
+        * rte_mbuf_refcnt_set(). The functionality of these functions (atomic,
+        * or non-atomic) is controlled by the CONFIG_RTE_MBUF_REFCNT_ATOMIC
+        * config option.
+        */
+       union {
+               rte_atomic16_t refcnt_atomic;   /**< Atomically accessed refcnt */
+               uint16_t refcnt;                /**< Non-atomically accessed refcnt */
+       };
+#else
+       uint16_t refcnt_reserved;     /**< Do not use this field */
+#endif
+       uint8_t type;                 /**< Type of mbuf. */
+       uint8_t reserved;             /**< Unused field. Required for padding. */
+       uint16_t ol_flags;            /**< Offload features. */
+
+       union {
+               struct rte_ctrlmbuf ctrl;
+               struct rte_pktmbuf pkt;
+       };
+} __rte_cache_aligned;
+
+/**
+ * Given the buf_addr returns the pointer to corresponding mbuf.
+ */
+#define RTE_MBUF_FROM_BADDR(ba)     (((struct rte_mbuf *)(ba)) - 1)
+
+/**
+ * Given the pointer to mbuf returns an address where it's  buf_addr
+ * should point to.
+ */
+#define RTE_MBUF_TO_BADDR(mb)       (((struct rte_mbuf *)(mb)) + 1)
+
+/**
+ * Returns TRUE if given mbuf is indirect, or FALSE otherwise.
+ */
+#define RTE_MBUF_INDIRECT(mb)   (RTE_MBUF_FROM_BADDR((mb)->buf_addr) != (mb))
+
+/**
+ * Returns TRUE if given mbuf is direct, or FALSE otherwise.
+ */
+#define RTE_MBUF_DIRECT(mb)     (RTE_MBUF_FROM_BADDR((mb)->buf_addr) == (mb))
+
+
+/**
+ * Private data in case of pktmbuf pool.
+ *
+ * A structure that contains some pktmbuf_pool-specific data that are
+ * appended after the mempool structure (in private data).
+ */
+struct rte_pktmbuf_pool_private {
+       uint16_t mbuf_data_room_size; /**< Size of data space in each mbuf.*/
+};
+
+#ifdef RTE_LIBRTE_MBUF_DEBUG
+
+/**  check mbuf type in debug mode */
+#define __rte_mbuf_sanity_check(m, t, is_h) rte_mbuf_sanity_check(m, t, is_h)
+
+/**  check mbuf type in debug mode if mbuf pointer is not null */
+#define __rte_mbuf_sanity_check_raw(m, t, is_h)        do {       \
+       if ((m) != NULL)                                   \
+               rte_mbuf_sanity_check(m, t, is_h);          \
+} while (0)
+
+/**  MBUF asserts in debug mode */
+#define RTE_MBUF_ASSERT(exp)                                         \
+if (!(exp)) {                                                        \
+       rte_panic("line%d\tassert \"" #exp "\" failed\n", __LINE__); \
+}
+
+#else /*  RTE_LIBRTE_MBUF_DEBUG */
+
+/**  check mbuf type in debug mode */
+#define __rte_mbuf_sanity_check(m, t, is_h) do { } while(0)
+
+/**  check mbuf type in debug mode if mbuf pointer is not null */
+#define __rte_mbuf_sanity_check_raw(m, t, is_h) do { } while(0)
+
+/**  MBUF asserts in debug mode */
+#define RTE_MBUF_ASSERT(exp)                do { } while(0)
+
+#endif /*  RTE_LIBRTE_MBUF_DEBUG */
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+#ifdef RTE_MBUF_REFCNT_ATOMIC
+
+/**
+ * Adds given value to an mbuf's refcnt and returns its new value.
+ * @param m
+ *   Mbuf to update
+ * @param value
+ *   Value to add/subtract
+ * @return
+ *   Updated value
+ */
+static inline uint16_t
+rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+       return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
+}
+
+/**
+ * Reads the value of an mbuf's refcnt.
+ * @param m
+ *   Mbuf to read
+ * @return
+ *   Reference count number.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_read(const struct rte_mbuf *m)
+{
+       return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
+}
+
+/**
+ * Sets an mbuf's refcnt to a defined value.
+ * @param m
+ *   Mbuf to update
+ * @param new_value
+ *   Value set
+ */
+static inline void
+rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
+{
+       rte_atomic16_set(&m->refcnt_atomic, new_value);
+}
+
+#else /* ! RTE_MBUF_REFCNT_ATOMIC */
+
+/**
+ * Adds given value to an mbuf's refcnt and returns its new value.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
+{
+       m->refcnt = (uint16_t)(m->refcnt + value);
+       return m->refcnt;
+}
+
+/**
+ * Reads the value of an mbuf's refcnt.
+ */
+static inline uint16_t
+rte_mbuf_refcnt_read(const struct rte_mbuf *m)
+{
+       return m->refcnt;
+}
+
+/**
+ * Sets an mbuf's refcnt to the defined value.
+ */
+static inline void
+rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
+{
+       m->refcnt = new_value;
+}
+
+#endif /* RTE_MBUF_REFCNT_ATOMIC */
+
+/** Mbuf prefetch */
+#define RTE_MBUF_PREFETCH_TO_FREE(m) do {       \
+       if ((m) != NULL)                        \
+               rte_prefetch0(m);               \
+} while (0)
+
+#else /* ! RTE_MBUF_SCATTER_GATHER */
+
+/** Mbuf prefetch */
+#define RTE_MBUF_PREFETCH_TO_FREE(m) do { } while(0)
+
+#endif /* RTE_MBUF_SCATTER_GATHER */
+
+
+/**
+ * Sanity checks on an mbuf.
+ *
+ * Check the consistency of the given mbuf. The function will cause a
+ * panic if corruption is detected.
+ *
+ * @param m
+ *   The mbuf to be checked.
+ * @param t
+ *   The expected type of the mbuf.
+ * @param is_header
+ *   True if the mbuf is a packet header, false if it is a sub-segment
+ *   of a packet (in this case, some fields like nb_segs are not checked)
+ */
+void
+rte_mbuf_sanity_check(const struct rte_mbuf *m, enum rte_mbuf_type t,
+                     int is_header);
+
+/**
+ * @internal Allocate a new mbuf from mempool *mp*.
+ * The use of that function is reserved for RTE internal needs.
+ * Please use either rte_ctrlmbuf_alloc() or rte_pktmbuf_alloc().
+ *
+ * @param mp
+ *   The mempool from which mbuf is allocated.
+ * @return
+ *   - The pointer to the new mbuf on success.
+ *   - NULL if allocation failed.
+ */
+static inline struct rte_mbuf *__rte_mbuf_raw_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+       void *mb = NULL;
+       if (rte_mempool_get(mp, &mb) < 0)
+               return NULL;
+       m = (struct rte_mbuf *)mb;
+#ifdef RTE_MBUF_SCATTER_GATHER
+       RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+       rte_mbuf_refcnt_set(m, 1);
+#endif /* RTE_MBUF_SCATTER_GATHER */
+       return (m);
+}
+
+/**
+ * @internal Put mbuf back into its original mempool.
+ * The use of that function is reserved for RTE internal needs.
+ * Please use either rte_ctrlmbuf_free() or rte_pktmbuf_free().
+ *
+ * @param m
+ *   The mbuf to be freed.
+ */
+static inline void __rte_mbuf_raw_free(struct rte_mbuf *m)
+{
+#ifdef RTE_MBUF_SCATTER_GATHER
+       RTE_MBUF_ASSERT(rte_mbuf_refcnt_read(m) == 0);
+#endif /* RTE_MBUF_SCATTER_GATHER */
+       rte_mempool_put(m->pool, m);
+}
+
+/* Operations on ctrl mbuf */
+
+/**
+ * The control mbuf constructor.
+ *
+ * This function initializes some fields in an mbuf structure that are
+ * not modified by the user once created (mbuf type, origin pool, buffer
+ * start address, and so on). This function is given as a callback function
+ * to rte_mempool_create() at pool creation time.
+ *
+ * @param mp
+ *   The mempool from which the mbuf is allocated.
+ * @param opaque_arg
+ *   A pointer that can be used by the user to retrieve useful information
+ *   for mbuf initialization. This pointer comes from the ``init_arg``
+ *   parameter of rte_mempool_create().
+ * @param m
+ *   The mbuf to initialize.
+ * @param i
+ *   The index of the mbuf in the pool table.
+ */
+void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
+                      void *m, unsigned i);
+
+/**
+ * Allocate a new mbuf (type is ctrl) from mempool *mp*.
+ *
+ * This new mbuf is initialized with data pointing to the beginning of
+ * buffer, and with a length of zero.
+ *
+ * @param mp
+ *   The mempool from which the mbuf is allocated.
+ * @return
+ *   - The pointer to the new mbuf on success.
+ *   - NULL if allocation failed.
+ */
+static inline struct rte_mbuf *rte_ctrlmbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+       if ((m = __rte_mbuf_raw_alloc(mp)) != NULL) {
+               m->ctrl.data = m->buf_addr;
+               m->ctrl.data_len = 0;
+               __rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0);
+       }
+       return (m);
+}
+
+/**
+ * Free a control mbuf back into its original mempool.
+ *
+ * @param m
+ *   The control mbuf to be freed.
+ */
+static inline void rte_ctrlmbuf_free(struct rte_mbuf *m)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_CTRL, 0);
+#ifdef RTE_MBUF_SCATTER_GATHER
+       if (rte_mbuf_refcnt_update(m, -1) == 0)
+#endif /* RTE_MBUF_SCATTER_GATHER */
+               __rte_mbuf_raw_free(m);
+}
+
+/**
+ * A macro that returns the pointer to the carried data.
+ *
+ * The value that can be read or assigned.
+ *
+ * @param m
+ *   The control mbuf.
+ */
+#define rte_ctrlmbuf_data(m) ((m)->ctrl.data)
+
+/**
+ * A macro that returns the length of the carried data.
+ *
+ * The value that can be read or assigned.
+ *
+ * @param m
+ *   The control mbuf.
+ */
+#define rte_ctrlmbuf_len(m) ((m)->ctrl.data_len)
+
+/* Operations on pkt mbuf */
+
+/**
+ * The packet mbuf constructor.
+ *
+ * This function initializes some fields in the mbuf structure that are not
+ * modified by the user once created (mbuf type, origin pool, buffer start
+ * address, and so on). This function is given as a callback function to
+ * rte_mempool_create() at pool creation time.
+ *
+ * @param mp
+ *   The mempool from which mbufs originate.
+ * @param opaque_arg
+ *   A pointer that can be used by the user to retrieve useful information
+ *   for mbuf initialization. This pointer comes from the ``init_arg``
+ *   parameter of rte_mempool_create().
+ * @param m
+ *   The mbuf to initialize.
+ * @param i
+ *   The index of the mbuf in the pool table.
+ */
+void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
+                     void *m, unsigned i);
+
+
+/**
+ * A  packet mbuf pool constructor.
+ *
+ * This function initializes the mempool private data in the case of a
+ * pktmbuf pool. This private data is needed by the driver. The
+ * function is given as a callback function to rte_mempool_create() at
+ * pool creation. It can be extended by the user, for example, to
+ * provide another packet size.
+ *
+ * @param mp
+ *   The mempool from which mbufs originate.
+ * @param opaque_arg
+ *   A pointer that can be used by the user to retrieve useful information
+ *   for mbuf initialization. This pointer comes from the ``init_arg``
+ *   parameter of rte_mempool_create().
+ */
+void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
+
+/**
+ * Reset the fields of a packet mbuf to their default values.
+ *
+ * The given mbuf must have only one segment.
+ *
+ * @param m
+ *   The packet mbuf to be resetted.
+ */
+static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
+{
+       uint32_t buf_ofs;
+
+       m->pkt.next = NULL;
+       m->pkt.pkt_len = 0;
+       m->pkt.l2_len = 0;
+       m->pkt.l3_len = 0;
+       m->pkt.vlan_tci = 0;
+       m->pkt.nb_segs = 1;
+       m->pkt.in_port = 0xff;
+
+       m->ol_flags = 0;
+       buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+                       RTE_PKTMBUF_HEADROOM : m->buf_len;
+       m->pkt.data = (char*) m->buf_addr + buf_ofs;
+
+       m->pkt.data_len = 0;
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+}
+
+/**
+ * Allocate a new mbuf (type is pkt) from a mempool.
+ *
+ * This new mbuf contains one segment, which has a length of 0. The pointer
+ * to data is initialized to have some bytes of headroom in the buffer
+ * (if buffer size allows).
+ *
+ * @param mp
+ *   The mempool from which the mbuf is allocated.
+ * @return
+ *   - The pointer to the new mbuf on success.
+ *   - NULL if allocation failed.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+       if ((m = __rte_mbuf_raw_alloc(mp)) != NULL)
+               rte_pktmbuf_reset(m);
+       return (m);
+}
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+
+/**
+ * Attach packet mbuf to another packet mbuf.
+ * After attachment we refer the mbuf we attached as 'indirect',
+ * while mbuf we attached to as 'direct'.
+ * Right now, not supported:
+ *  - attachment to indirect mbuf (e.g. - md  has to be direct).
+ *  - attachment for already indirect mbuf (e.g. - mi has to be direct).
+ *  - mbuf we trying to attach (mi) is used by someone else
+ *    e.g. it's reference counter is greater then 1.
+ *
+ * @param mi
+ *   The indirect packet mbuf.
+ * @param md
+ *   The direct packet mbuf.
+ */
+
+static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *md)
+{
+       RTE_MBUF_ASSERT(RTE_MBUF_DIRECT(md) &&
+           RTE_MBUF_DIRECT(mi) &&
+           rte_mbuf_refcnt_read(mi) == 1);
+
+       rte_mbuf_refcnt_update(md, 1);
+       mi->buf_physaddr = md->buf_physaddr;
+       mi->buf_addr = md->buf_addr;
+       mi->buf_len = md->buf_len;
+
+       mi->pkt = md->pkt;
+
+       mi->pkt.next = NULL;
+       mi->pkt.pkt_len = mi->pkt.data_len;
+       mi->pkt.nb_segs = 1;
+
+       __rte_mbuf_sanity_check(mi, RTE_MBUF_PKT, 1);
+       __rte_mbuf_sanity_check(md, RTE_MBUF_PKT, 0);
+}
+
+/**
+ * Detach an indirect packet mbuf -
+ *  - restore original mbuf address and length values.
+ *  - reset pktmbuf data and data_len to their default values.
+ *  All other fields of the given packet mbuf will be left intact.
+ *
+ * @param m
+ *   The indirect attached packet mbuf.
+ */
+
+static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
+{
+       const struct rte_mempool *mp = m->pool;
+       void *buf = RTE_MBUF_TO_BADDR(m);
+       uint32_t buf_ofs;
+       uint32_t buf_len = mp->elt_size - sizeof(*m);
+       m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof (*m);
+
+       m->buf_addr = buf;
+       m->buf_len = (uint16_t)buf_len;
+
+       buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+                       RTE_PKTMBUF_HEADROOM : m->buf_len;
+       m->pkt.data = (char*) m->buf_addr + buf_ofs;
+
+       m->pkt.data_len = 0;
+}
+
+#endif /* RTE_MBUF_SCATTER_GATHER */
+
+/**
+ * Free a segment of a packet mbuf into its original mempool.
+ *
+ * Free an mbuf, without parsing other segments in case of chained
+ * buffers.
+ *
+ * @param m
+ *   The packet mbuf segment to be freed.
+ */
+static inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 0);
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+       if (likely (rte_mbuf_refcnt_read(m) == 1) ||
+                       likely (rte_mbuf_refcnt_update(m, -1) == 0)) {
+               struct rte_mbuf *md = RTE_MBUF_FROM_BADDR(m->buf_addr);
+
+               rte_mbuf_refcnt_set(m, 0);
+
+               /* if this is an indirect mbuf, then
+                *  - detach mbuf
+                *  - free attached mbuf segment
+                */
+               if (unlikely (md != m)) {
+                       rte_pktmbuf_detach(m);
+                       if (rte_mbuf_refcnt_update(md, -1) == 0)
+                               __rte_mbuf_raw_free(md);
+               }
+#endif
+               __rte_mbuf_raw_free(m);
+#ifdef RTE_MBUF_SCATTER_GATHER
+       }
+#endif
+}
+
+/**
+ * Free a packet mbuf back into its original mempool.
+ *
+ * Free an mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param m
+ *   The packet mbuf to be freed.
+ */
+static inline void rte_pktmbuf_free(struct rte_mbuf *m)
+{
+       struct rte_mbuf *m_next;
+
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       while (m != NULL) {
+               m_next = m->pkt.next;
+               rte_pktmbuf_free_seg(m);
+               m = m_next;
+       }
+}
+
+#ifdef RTE_MBUF_SCATTER_GATHER
+
+/**
+ * Creates a "clone" of the given packet mbuf.
+ *
+ * Walks through all segments of the given packet mbuf, and for each of them:
+ *  - Creates a new packet mbuf from the given pool.
+ *  - Attaches newly created mbuf to the segment.
+ * Then updates pkt_len and nb_segs of the "clone" packet mbuf to match values
+ * from the original packet mbuf.
+ *
+ * @param md
+ *   The packet mbuf to be cloned.
+ * @param mp
+ *   The mempool from which the "clone" mbufs are allocated.
+ * @return
+ *   - The pointer to the new "clone" mbuf on success.
+ *   - NULL if allocation fails.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
+               struct rte_mempool *mp)
+{
+       struct rte_mbuf *mc, *mi, **prev;
+       uint32_t pktlen;
+       uint8_t nseg;
+
+       if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
+               return (NULL);
+
+       mi = mc;
+       prev = &mi->pkt.next;
+       pktlen = md->pkt.pkt_len;
+       nseg = 0;
+
+       do {
+               nseg++;
+               rte_pktmbuf_attach(mi, md);
+               *prev = mi;
+               prev = &mi->pkt.next;
+       } while ((md = md->pkt.next) != NULL &&
+           (mi = rte_pktmbuf_alloc(mp)) != NULL);
+
+       *prev = NULL;
+       mc->pkt.nb_segs = nseg;
+       mc->pkt.pkt_len = pktlen;
+
+       /* Allocation of new indirect segment failed */
+       if (unlikely (mi == NULL)) {
+               rte_pktmbuf_free(mc);
+               return (NULL);
+       }
+
+       __rte_mbuf_sanity_check(mc, RTE_MBUF_PKT, 1);
+       return (mc);
+}
+
+/**
+ * Adds given value to the refcnt of all packet mbuf segments.
+ *
+ * Walks through all segments of given packet mbuf and for each of them
+ * invokes rte_mbuf_refcnt_update().
+ *
+ * @param m
+ *   The packet mbuf whose refcnt to be updated.
+ * @param v
+ *   The value to add to the mbuf's segments refcnt.
+ */
+static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       do {
+               rte_mbuf_refcnt_update(m, v);
+       } while ((m = m->pkt.next) != NULL);
+}
+
+#endif /* RTE_MBUF_SCATTER_GATHER */
+
+/**
+ * Get the headroom in a packet mbuf.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @return
+ *   The length of the headroom.
+ */
+static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       return (uint16_t) ((char*) m->pkt.data - (char*) m->buf_addr);
+}
+
+/**
+ * Get the tailroom of a packet mbuf.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @return
+ *   The length of the tailroom.
+ */
+static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
+                         m->pkt.data_len);
+}
+
+/**
+ * Get the last segment of the packet.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @return
+ *   The last segment of the given mbuf.
+ */
+static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
+{
+       struct rte_mbuf *m2 = (struct rte_mbuf *)m;
+
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       while (m2->pkt.next != NULL)
+               m2 = m2->pkt.next;
+       return m2;
+}
+
+/**
+ * A macro that points to the start of the data in the mbuf.
+ *
+ * The returned pointer is cast to type t. Before using this
+ * function, the user must ensure that m_headlen(m) is large enough to
+ * read its data.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @param t
+ *   The type to cast the result into.
+ */
+#define rte_pktmbuf_mtod(m, t) ((t)((m)->pkt.data))
+
+/**
+ * A macro that returns the length of the packet.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ *   The packet mbuf.
+ */
+#define rte_pktmbuf_pkt_len(m) ((m)->pkt.pkt_len)
+
+/**
+ * A macro that returns the length of the segment.
+ *
+ * The value can be read or assigned.
+ *
+ * @param m
+ *   The packet mbuf.
+ */
+#define rte_pktmbuf_data_len(m) ((m)->pkt.data_len)
+
+/**
+ * Prepend len bytes to an mbuf data area.
+ *
+ * Returns a pointer to the new
+ * data start address. If there is not enough headroom in the first
+ * segment, the function will return NULL, without modifying the mbuf.
+ *
+ * @param m
+ *   The pkt mbuf.
+ * @param len
+ *   The amount of data to prepend (in bytes).
+ * @return
+ *   A pointer to the start of the newly prepended data, or
+ *   NULL if there is not enough headroom space in the first segment
+ */
+static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
+                                       uint16_t len)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       if (unlikely(len > rte_pktmbuf_headroom(m)))
+               return NULL;
+
+       m->pkt.data = (char*) m->pkt.data - len;
+       m->pkt.data_len = (uint16_t)(m->pkt.data_len + len);
+       m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+
+       return (char*) m->pkt.data;
+}
+
+/**
+ * Append len bytes to an mbuf.
+ *
+ * Append len bytes to an mbuf and return a pointer to the start address
+ * of the added data. If there is not enough tailroom in the last
+ * segment, the function will return NULL, without modifying the mbuf.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @param len
+ *   The amount of data to append (in bytes).
+ * @return
+ *   A pointer to the start of the newly appended data, or
+ *   NULL if there is not enough tailroom space in the last segment
+ */
+static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
+{
+       void *tail;
+       struct rte_mbuf *m_last;
+
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       m_last = rte_pktmbuf_lastseg(m);
+       if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
+               return NULL;
+
+       tail = (char*) m_last->pkt.data + m_last->pkt.data_len;
+       m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len + len);
+       m->pkt.pkt_len  = (m->pkt.pkt_len + len);
+       return (char*) tail;
+}
+
+/**
+ * Remove len bytes at the beginning of an mbuf.
+ *
+ * Returns a pointer to the start address of the new data area. If the
+ * length is greater than the length of the first segment, then the
+ * function will fail and return NULL, without modifying the mbuf.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @param len
+ *   The amount of data to remove (in bytes).
+ * @return
+ *   A pointer to the new start of the data.
+ */
+static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       if (unlikely(len > m->pkt.data_len))
+               return NULL;
+
+       m->pkt.data_len = (uint16_t)(m->pkt.data_len - len);
+       m->pkt.data = ((char*) m->pkt.data + len);
+       m->pkt.pkt_len  = (m->pkt.pkt_len - len);
+       return (char*) m->pkt.data;
+}
+
+/**
+ * Remove len bytes of data at the end of the mbuf.
+ *
+ * If the length is greater than the length of the last segment, the
+ * function will fail and return -1 without modifying the mbuf.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @param len
+ *   The amount of data to remove (in bytes).
+ * @return
+ *   - 0: On success.
+ *   - -1: On error.
+ */
+static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
+{
+       struct rte_mbuf *m_last;
+
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+
+       m_last = rte_pktmbuf_lastseg(m);
+       if (unlikely(len > m_last->pkt.data_len))
+               return -1;
+
+       m_last->pkt.data_len = (uint16_t)(m_last->pkt.data_len - len);
+       m->pkt.pkt_len  = (m->pkt.pkt_len - len);
+       return 0;
+}
+
+/**
+ * Test if mbuf data is contiguous.
+ *
+ * @param m
+ *   The packet mbuf.
+ * @return
+ *   - 1, if all data is contiguous (one segment).
+ *   - 0, if there is several segments.
+ */
+static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
+{
+       __rte_mbuf_sanity_check(m, RTE_MBUF_PKT, 1);
+       return !!(m->pkt.nb_segs == 1);
+}
+
+/**
+ * Dump an mbuf structure to the console.
+ *
+ * Dump all fields for the given packet mbuf and all its associated
+ * segments (in the case of a chained buffer).
+ *
+ * @param m
+ *   The packet mbuf.
+ * @param dump_len
+ *   If dump_len != 0, also dump the "dump_len" first data bytes of
+ *   the packet.
+ */
+void rte_pktmbuf_dump(const struct rte_mbuf *m, unsigned dump_len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MBUF_H_ */
diff --git a/lib/librte_mempool/Makefile b/lib/librte_mempool/Makefile
new file mode 100644 (file)
index 0000000..5b3cac0
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_mempool.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_MEMPOOL) := rte_mempool.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_MEMPOOL)-include := rte_mempool.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_MEMPOOL) += lib/librte_eal lib/librte_ring
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
new file mode 100644 (file)
index 0000000..b0a3c99
--- /dev/null
@@ -0,0 +1,491 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_atomic.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "rte_mempool.h"
+
+TAILQ_HEAD(rte_mempool_list, rte_mempool);
+
+/* global list of mempool (used for debug/dump) */
+static struct rte_mempool_list *mempool_list;
+
+/*
+ * return the greatest common divisor between a and b (fast algorithm)
+ *
+ */
+static unsigned get_gcd(unsigned a, unsigned b)
+{
+       unsigned c;
+
+       if (0 == a)
+               return b;
+       if (0 == b)
+               return a;
+
+       if (a < b) {
+               c = a;
+               a = b;
+               b = c;
+       }
+
+       while (b != 0) {
+               c = a % b;
+               a = b;
+               b = c;
+       }
+
+       return a;
+}
+
+/*
+ * Depending on memory configuration, objects addresses are spreaded
+ * between channels and ranks in RAM: the pool allocator will add
+ * padding between objects. This function return the new size of the
+ * object.
+ */
+static unsigned optimize_object_size(unsigned obj_size)
+{
+       unsigned nrank, nchan;
+       unsigned new_obj_size;
+
+       /* get number of channels */
+       nchan = rte_memory_get_nchannel();
+       if (nchan == 0)
+               nchan = 1;
+
+       nrank = rte_memory_get_nrank();
+       if (nrank == 0)
+               nrank = 1;
+
+       /* process new object size */
+       new_obj_size = (obj_size + CACHE_LINE_MASK) / CACHE_LINE_SIZE;
+       while (get_gcd(new_obj_size, nrank * nchan) != 1 ||
+                       get_gcd(nchan, new_obj_size) != 1)
+               new_obj_size++;
+       return new_obj_size * CACHE_LINE_SIZE;
+}
+
+/* create the mempool */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+                  unsigned cache_size, unsigned private_data_size,
+                  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+                  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+                  int socket_id, unsigned flags)
+{
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       char rg_name[RTE_RING_NAMESIZE];
+       struct rte_mempool *mp;
+       struct rte_ring *r;
+       const struct rte_memzone *mz;
+       size_t mempool_size;
+       int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
+       int rg_flags = 0;
+       uint32_t header_size, trailer_size;
+       uint32_t total_elt_size;
+       unsigned i;
+       void *obj;
+
+       /* compilation-time checks */
+       RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
+                         CACHE_LINE_MASK) != 0);
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
+                         CACHE_LINE_MASK) != 0);
+       RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, local_cache) &
+                         CACHE_LINE_MASK) != 0);
+#endif
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
+                         CACHE_LINE_MASK) != 0);
+       RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
+                         CACHE_LINE_MASK) != 0);
+#endif
+
+       /* check that we have an initialised tail queue */
+       if (mempool_list == NULL)
+               if ((mempool_list = RTE_TAILQ_RESERVE("RTE_MEMPOOL", \
+                               rte_mempool_list)) == NULL){
+                       rte_errno = E_RTE_NO_TAILQ;
+                       return NULL;
+               }
+
+       /* asked cache too big */
+       if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE){
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
+       /* "no cache align" imply "no spread" */
+       if (flags & MEMPOOL_F_NO_CACHE_ALIGN)
+               flags |= MEMPOOL_F_NO_SPREAD;
+
+       /* ring flags */
+       if (flags & MEMPOOL_F_SP_PUT)
+               rg_flags |= RING_F_SP_ENQ;
+       if (flags & MEMPOOL_F_SC_GET)
+               rg_flags |= RING_F_SC_DEQ;
+
+       /* allocate the ring that will be used to store objects */
+       /* Ring functions will return appropriate errors if we are
+        * running as a secondary process etc., so no checks made
+        * in this function for that condition */
+       rte_snprintf(rg_name, sizeof(rg_name), "MP_%s", name);
+       r = rte_ring_create(rg_name, rte_align32pow2(n+1), socket_id, rg_flags);
+       if (r == NULL)
+               return NULL;
+
+       /*
+        * In header, we have at least the pointer to the pool, and
+        * optionaly a 64 bits cookie.
+        */
+       header_size = 0;
+       header_size += sizeof(struct rte_mempool *); /* ptr to pool */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       header_size += sizeof(uint64_t); /* cookie */
+#endif
+       if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0)
+               header_size = (header_size + CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
+
+       /* trailer contains the cookie in debug mode */
+       trailer_size = 0;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       trailer_size += sizeof(uint64_t); /* cookie */
+#endif
+       /* element size is 8 bytes-aligned at least */
+       elt_size = (elt_size + 7) & (~7);
+
+       /* expand trailer to next cache line */
+       if ((flags & MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
+               total_elt_size = header_size + elt_size + trailer_size;
+               trailer_size += ((CACHE_LINE_SIZE -
+                                 (total_elt_size & CACHE_LINE_MASK)) &
+                                CACHE_LINE_MASK);
+       }
+
+       /*
+        * increase trailer to add padding between objects in order to
+        * spread them accross memory channels/ranks
+        */
+       if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
+               unsigned new_size;
+               new_size = optimize_object_size(header_size + elt_size +
+                                               trailer_size);
+               trailer_size = new_size - header_size - elt_size;
+       }
+
+       /* this is the size of an object, including header and trailer */
+       total_elt_size = header_size + elt_size + trailer_size;
+
+       /* reserve a memory zone for this mempool: private data is
+        * cache-aligned */
+       private_data_size = (private_data_size +
+                            CACHE_LINE_MASK) & (~CACHE_LINE_MASK);
+       mempool_size = total_elt_size * n +
+               sizeof(struct rte_mempool) + private_data_size;
+       rte_snprintf(mz_name, sizeof(mz_name), "MP_%s", name);
+       mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
+
+       /*
+        * no more memory: in this case we loose previously reserved
+        * space for the as we cannot free it
+        */
+       if (mz == NULL)
+               return NULL;
+
+       /* init the mempool structure */
+       mp = mz->addr;
+       memset(mp, 0, sizeof(*mp));
+       rte_snprintf(mp->name, sizeof(mp->name), "%s", name);
+       mp->phys_addr = mz->phys_addr;
+       mp->ring = r;
+       mp->size = n;
+       mp->flags = flags;
+       mp->bulk_default = 1;
+       mp->elt_size = elt_size;
+       mp->header_size = header_size;
+       mp->trailer_size = trailer_size;
+       mp->cache_size = cache_size;
+       mp->private_data_size = private_data_size;
+
+       /* call the initializer */
+       if (mp_init)
+               mp_init(mp, mp_init_arg);
+
+       /* fill the headers and trailers, and add objects in ring */
+       obj = (char *)mp + sizeof(struct rte_mempool) + private_data_size;
+       for (i = 0; i < n; i++) {
+               struct rte_mempool **mpp;
+               obj = (char *)obj + header_size;
+
+               /* set mempool ptr in header */
+               mpp = __mempool_from_obj(obj);
+               *mpp = mp;
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               __mempool_write_header_cookie(obj, 1);
+               __mempool_write_trailer_cookie(obj);
+#endif
+               /* call the initializer */
+               if (obj_init)
+                       obj_init(mp, obj_init_arg, obj, i);
+
+               /* enqueue in ring */
+               rte_ring_sp_enqueue(mp->ring, obj);
+               obj = (char *)obj + elt_size + trailer_size;
+       }
+
+       TAILQ_INSERT_TAIL(mempool_list, mp, next);
+       return mp;
+}
+
+/* Return the number of entries in the mempool */
+unsigned
+rte_mempool_count(const struct rte_mempool *mp)
+{
+       unsigned count;
+
+       count = rte_ring_count(mp->ring);
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       {
+               unsigned lcore_id;
+               if (mp->cache_size == 0)
+                       return count;
+
+               for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+                       count += mp->local_cache[lcore_id].len;
+       }
+#endif
+
+       /*
+        * due to race condition (access to len is not locked), the
+        * total can be greater than size... so fix the result
+        */
+       if (count > mp->size)
+               return mp->size;
+       return count;
+}
+
+/* dump the cache status */
+static unsigned
+rte_mempool_dump_cache(const struct rte_mempool *mp)
+{
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       unsigned lcore_id;
+       unsigned count = 0;
+       unsigned cache_count;
+
+       printf("  cache infos:\n");
+       printf("    cache_size=%"PRIu32"\n", mp->cache_size);
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               cache_count = mp->local_cache[lcore_id].len;
+               printf("    cache_count[%u]=%u\n", lcore_id, cache_count);
+               count += cache_count;
+       }
+       printf("    total_cache_count=%u\n", count);
+       return count;
+#else
+       RTE_SET_USED(mp);
+       printf("  cache disabled\n");
+       return 0;
+#endif
+}
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/* check cookies before and after objects */
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static void
+mempool_audit_cookies(const struct rte_mempool *mp)
+{
+       unsigned i;
+       void *obj;
+       void * const *obj_table;
+
+       obj = (char *)mp + sizeof(struct rte_mempool) + mp->private_data_size;
+       for (i = 0; i < mp->size; i++) {
+               obj = (char *)obj + mp->header_size;
+               obj_table = &obj;
+               __mempool_check_cookies(mp, obj_table, 1, 2);
+               obj = (char *)obj + mp->elt_size + mp->trailer_size;
+       }
+}
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic error "-Wcast-qual"
+#endif
+#else
+#define mempool_audit_cookies(mp) do {} while(0)
+#endif
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+/* check cookies before and after objects */
+static void
+mempool_audit_cache(const struct rte_mempool *mp)
+{
+       /* check cache size consistency */
+       unsigned lcore_id;
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               if (mp->local_cache[lcore_id].len > mp->cache_size) {
+                       RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
+                               lcore_id);
+                       rte_panic("MEMPOOL: invalid cache len\n");
+               }
+       }
+}
+#else
+#define mempool_audit_cache(mp) do {} while(0)
+#endif
+
+
+/* check the consistency of mempool (size, cookies, ...) */
+void
+rte_mempool_audit(const struct rte_mempool *mp)
+{
+       mempool_audit_cache(mp);
+       mempool_audit_cookies(mp);
+}
+
+/* dump the status of the mempool on the console */
+void
+rte_mempool_dump(const struct rte_mempool *mp)
+{
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       struct rte_mempool_debug_stats sum;
+       unsigned lcore_id;
+#endif
+       unsigned common_count;
+       unsigned cache_count;
+
+       printf("mempool <%s>@%p\n", mp->name, mp);
+       printf("  flags=%x\n", mp->flags);
+       printf("  ring=<%s>@%p\n", mp->ring->name, mp->ring);
+       printf("  size=%"PRIu32"\n", mp->size);
+       printf("  bulk_default=%"PRIu32"\n", mp->bulk_default);
+       printf("  header_size=%"PRIu32"\n", mp->header_size);
+       printf("  elt_size=%"PRIu32"\n", mp->elt_size);
+       printf("  trailer_size=%"PRIu32"\n", mp->trailer_size);
+       printf("  total_obj_size=%"PRIu32"\n",
+              mp->header_size + mp->elt_size + mp->trailer_size);
+
+       cache_count = rte_mempool_dump_cache(mp);
+       common_count = rte_ring_count(mp->ring);
+       if ((cache_count + common_count) > mp->size)
+               common_count = mp->size - cache_count;
+       printf("  common_pool_count=%u\n", common_count);
+
+       /* sum and dump statistics */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       memset(&sum, 0, sizeof(sum));
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               sum.put_bulk += mp->stats[lcore_id].put_bulk;
+               sum.put_objs += mp->stats[lcore_id].put_objs;
+               sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
+               sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
+               sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
+               sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
+       }
+       printf("  stats:\n");
+       printf("    put_bulk=%"PRIu64"\n", sum.put_bulk);
+       printf("    put_objs=%"PRIu64"\n", sum.put_objs);
+       printf("    get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
+       printf("    get_success_objs=%"PRIu64"\n", sum.get_success_objs);
+       printf("    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
+       printf("    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
+#else
+       printf("  no statistics available\n");
+#endif
+
+       rte_mempool_audit(mp);
+}
+
+/* dump the status of all mempools on the console */
+void
+rte_mempool_list_dump(void)
+{
+       const struct rte_mempool *mp = NULL;
+
+       TAILQ_FOREACH(mp, mempool_list, next) {
+               rte_mempool_dump(mp);
+       }
+}
+
+/* search a mempool from its name */
+struct rte_mempool *
+rte_mempool_lookup(const char *name)
+{
+       struct rte_mempool *mp = NULL;
+
+       /* check that we have an initialised tail queue */
+       if (mempool_list == NULL)
+               if ((mempool_list = RTE_TAILQ_RESERVE("RTE_MEMPOOL", \
+                               rte_mempool_list)) == NULL){
+                       rte_errno = E_RTE_NO_TAILQ;
+                       return NULL;
+               }
+
+       TAILQ_FOREACH(mp, mempool_list, next) {
+               if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0)
+                       break;
+       }
+       if (mp == NULL)
+               rte_errno = ENOENT;
+
+       return mp;
+}
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
new file mode 100644 (file)
index 0000000..cfc62f7
--- /dev/null
@@ -0,0 +1,1087 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_MEMPOOL_H_
+#define _RTE_MEMPOOL_H_
+
+/**
+ * @file
+ * RTE Mempool.
+ *
+ * A memory pool is an allocator of fixed-size object. It is
+ * identified by its name, and uses a ring to store free objects. It
+ * provides some other optional services, like a per-core object
+ * cache, and an alignment helper to ensure that objects are padded
+ * to spread them equally on all RAM channels, ranks, and so on.
+ *
+ * Objects owned by a mempool should never be added in another
+ * mempool. When an object is freed using rte_mempool_put() or
+ * equivalent, the object data is not modified; the user can save some
+ * meta-data in the object data and retrieve them when allocating a
+ * new object.
+ *
+ * Note: the mempool implementation is not preemptable. A lcore must
+ * not be interrupted by another task that uses the same mempool
+ * (because it uses a ring which is not preemptable). Also, mempool
+ * functions must not be used outside the DPDK environment: for
+ * example, in linuxapp environment, a thread that is not created by
+ * the EAL must not use mempools. This is due to the per-lcore cache
+ * that won't work as rte_lcore_id() will not return a correct value.
+ */
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/queue.h>
+
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_MEMPOOL_HEADER_COOKIE1  0xbadbadbadadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_HEADER_COOKIE2  0xf2eef2eedadd2e55ULL /**< Header cookie. */
+#define RTE_MEMPOOL_TRAILER_COOKIE  0xadd2e55badbadbadULL /**< Trailer cookie.*/
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/**
+ * A structure that stores the mempool statistics (per-lcore).
+ */
+struct rte_mempool_debug_stats {
+       uint64_t put_bulk;         /**< Number of puts. */
+       uint64_t put_objs;         /**< Number of objects successfully put. */
+       uint64_t get_success_bulk; /**< Successful allocation number. */
+       uint64_t get_success_objs; /**< Objects successfully allocated. */
+       uint64_t get_fail_bulk;    /**< Failed allocation number. */
+       uint64_t get_fail_objs;    /**< Objects that failed to be allocated. */
+} __rte_cache_aligned;
+#endif
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+/**
+ * A structure that stores a per-core object cache.
+ */
+struct rte_mempool_cache {
+       unsigned len; /**< Cache len */
+       void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE]; /**< Cache objects */
+} __rte_cache_aligned;
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+#define RTE_MEMPOOL_NAMESIZE 32 /**< Maximum length of a memory pool. */
+
+/**
+ * The RTE mempool structure.
+ */
+struct rte_mempool {
+       TAILQ_ENTRY(rte_mempool) next;   /**< Next in list. */
+
+       char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */
+       struct rte_ring *ring;           /**< Ring to store objects. */
+       phys_addr_t phys_addr;           /**< Phys. addr. of mempool struct. */
+       int flags;                       /**< Flags of the mempool. */
+       uint32_t size;                   /**< Size of the mempool. */
+       uint32_t bulk_default;           /**< Default bulk count. */
+       uint32_t cache_size;             /**< Size of per-lcore local cache. */
+
+       uint32_t elt_size;               /**< Size of an element. */
+       uint32_t header_size;            /**< Size of header (before elt). */
+       uint32_t trailer_size;           /**< Size of trailer (after elt). */
+
+       unsigned private_data_size;      /**< Size of private data. */
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       /** Per-lcore local cache. */
+       struct rte_mempool_cache local_cache[RTE_MAX_LCORE];
+#endif
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       /** Per-lcore statistics. */
+       struct rte_mempool_debug_stats stats[RTE_MAX_LCORE];
+#endif
+}  __rte_cache_aligned;
+
+#define MEMPOOL_F_NO_SPREAD      0x0001 /**< Do not spread in memory. */
+#define MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /**< Do not align objs on cache lines.*/
+#define MEMPOOL_F_SP_PUT         0x0004 /**< Default put is "single-producer".*/
+#define MEMPOOL_F_SC_GET         0x0008 /**< Default get is "single-consumer".*/
+
+/**
+ * When debug is enabled, store some statistics.
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param name
+ *   Name of the statistics field to increment in the memory pool.
+ * @param n
+ *   Number to add to the object-oriented statistics.
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {                   \
+               unsigned __lcore_id = rte_lcore_id();           \
+               mp->stats[__lcore_id].name##_objs += n;         \
+               mp->stats[__lcore_id].name##_bulk += 1;         \
+       } while(0)
+#else
+#define __MEMPOOL_STAT_ADD(mp, name, n) do {} while(0)
+#endif
+
+/**
+ * Get a pointer to a mempool pointer in the object header.
+ * @param obj
+ *   Pointer to object.
+ * @return
+ *   The pointer to the mempool from which the object was allocated.
+ */
+static inline struct rte_mempool **__mempool_from_obj(void *obj)
+{
+       struct rte_mempool **mpp;
+       unsigned off;
+
+       off = sizeof(struct rte_mempool *);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       off += sizeof(uint64_t);
+#endif
+       mpp = (struct rte_mempool **)((char *)obj - off);
+       return mpp;
+}
+
+/**
+ * Return a pointer to the mempool owning this object.
+ *
+ * @param obj
+ *   An object that is owned by a pool. If this is not the case,
+ *   the behavior is undefined.
+ * @return
+ *   A pointer to the mempool structure.
+ */
+static inline const struct rte_mempool *rte_mempool_from_obj(void *obj)
+{
+       struct rte_mempool * const *mpp;
+       mpp = __mempool_from_obj(obj);
+       return *mpp;
+}
+
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+/* get header cookie value */
+static inline uint64_t __mempool_read_header_cookie(const void *obj)
+{
+       return *(const uint64_t *)((const char *)obj - sizeof(uint64_t));
+}
+
+/* get trailer cookie value */
+static inline uint64_t __mempool_read_trailer_cookie(void *obj)
+{
+       struct rte_mempool **mpp = __mempool_from_obj(obj);
+       return *(uint64_t *)((char *)obj + (*mpp)->elt_size);
+}
+
+/* write header cookie value */
+static inline void __mempool_write_header_cookie(void *obj, int free)
+{
+       uint64_t *cookie_p;
+       cookie_p = (uint64_t *)((char *)obj - sizeof(uint64_t));
+       if (free == 0)
+               *cookie_p = RTE_MEMPOOL_HEADER_COOKIE1;
+       else
+               *cookie_p = RTE_MEMPOOL_HEADER_COOKIE2;
+
+}
+
+/* write trailer cookie value */
+static inline void __mempool_write_trailer_cookie(void *obj)
+{
+       uint64_t *cookie_p;
+       struct rte_mempool **mpp = __mempool_from_obj(obj);
+       cookie_p = (uint64_t *)((char *)obj + (*mpp)->elt_size);
+       *cookie_p = RTE_MEMPOOL_TRAILER_COOKIE;
+}
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+/**
+ * Check and update cookies or panic.
+ *
+ * @param mp
+ *   Pointer to the memory pool.
+ * @param obj_table_const
+ *   Pointer to a table of void * pointers (objects).
+ * @param n
+ *   Index of object in object table.
+ * @param free
+ *   - 0: object is supposed to be allocated, mark it as free
+ *   - 1: object is supposed to be free, mark it as allocated
+ *   - 2: just check that cookie is valid (free or allocated)
+ */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+static inline void __mempool_check_cookies(const struct rte_mempool *mp,
+                                          void * const *obj_table_const,
+                                          unsigned n, int free)
+{
+       uint64_t cookie;
+       void *tmp;
+       void *obj;
+       void **obj_table;
+
+       /* Force to drop the "const" attribute. This is done only when
+        * DEBUG is enabled */
+       tmp = (void *) obj_table_const;
+       obj_table = (void **) tmp;
+
+       while (n--) {
+               obj = obj_table[n];
+
+               if (rte_mempool_from_obj(obj) != mp)
+                       rte_panic("MEMPOOL: object is owned by another "
+                                 "mempool\n");
+
+               cookie = __mempool_read_header_cookie(obj);
+
+               if (free == 0) {
+                       if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
+                               rte_log_set_history(0);
+                               RTE_LOG(CRIT, MEMPOOL,
+                                       "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                                       obj, mp, cookie);
+                               rte_panic("MEMPOOL: bad header cookie (put)\n");
+                       }
+                       __mempool_write_header_cookie(obj, 1);
+               }
+               else if (free == 1) {
+                       if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+                               rte_log_set_history(0);
+                               RTE_LOG(CRIT, MEMPOOL,
+                                       "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                                       obj, mp, cookie);
+                               rte_panic("MEMPOOL: bad header cookie (get)\n");
+                       }
+                       __mempool_write_header_cookie(obj, 0);
+               }
+               else if (free == 2) {
+                       if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
+                           cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
+                               rte_log_set_history(0);
+                               RTE_LOG(CRIT, MEMPOOL,
+                                       "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                                       obj, mp, cookie);
+                               rte_panic("MEMPOOL: bad header cookie (audit)\n");
+                       }
+               }
+               cookie = __mempool_read_trailer_cookie(obj);
+               if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
+                       rte_log_set_history(0);
+                       RTE_LOG(CRIT, MEMPOOL,
+                               "obj=%p, mempool=%p, cookie=%"PRIx64"\n",
+                               obj, mp, cookie);
+                       rte_panic("MEMPOOL: bad trailer cookie\n");
+               }
+       }
+}
+#ifndef __INTEL_COMPILER
+#pragma GCC diagnostic error "-Wcast-qual"
+#endif
+#else
+#define __mempool_check_cookies(mp, obj_table_const, n, free) do {} while(0)
+#endif /* RTE_LIBRTE_MEMPOOL_DEBUG */
+
+/**
+ * An object constructor callback function for mempool.
+ *
+ * Arguments are the mempool, the opaque pointer given by the user in
+ * rte_mempool_create(), the pointer to the element and the index of
+ * the element in the pool.
+ */
+typedef void (rte_mempool_obj_ctor_t)(struct rte_mempool *, void *,
+                                     void *, unsigned);
+
+/**
+ * A mempool constructor callback function.
+ *
+ * Arguments are the mempool and the opaque pointer given by the user in
+ * rte_mempool_create().
+ */
+typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *);
+
+/**
+ * Creates a new mempool named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory. The
+ * pool contains n elements of elt_size. Its size is set to n. By
+ * default, bulk_default_count (the default number of elements to
+ * get/put in the pool) is set to 1. @see rte_mempool_set_bulk_count()
+ * to modify this valule.
+ *
+ * @param name
+ *   The name of the mempool.
+ * @param n
+ *   The number of elements in the mempool. The optimum size (in terms of
+ *   memory usage) for a mempool is when n is a power of two minus one:
+ *   n = (2^q - 1).
+ * @param elt_size
+ *   The size of each element.
+ * @param cache_size
+ *   If cache_size is non-zero, the rte_mempool library will try to
+ *   limit the accesses to the common lockless pool, by maintaining a
+ *   per-lcore object cache. This argument must be lower or equal to
+ *   CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE. It is advised to choose
+ *   cache_size to have "n modulo cache_size == 0": if this is
+ *   not the case, some elements will always stay in the pool and will
+ *   never be used. The access to the per-lcore table is of course
+ *   faster than the multi-producer/consumer pool. The cache can be
+ *   disabled if the cache_size argument is set to 0; it can be useful to
+ *   avoid loosing objects in cache. Note that even if not used, the
+ *   memory space for cache is always reserved in a mempool structure,
+ *   except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
+ * @param private_data_size
+ *   The size of the private data appended after the mempool
+ *   structure. This is useful for storing some private data after the
+ *   mempool structure, as is done for rte_mbuf_pool for example.
+ * @param mp_init
+ *   A function pointer that is called for initialization of the pool,
+ *   before object initialization. The user can initialize the private
+ *   data in this function if needed. This parameter can be NULL if
+ *   not needed.
+ * @param mp_init_arg
+ *   An opaque pointer to data that can be used in the mempool
+ *   constructor function.
+ * @param obj_init
+ *   A function pointer that is called for each object at
+ *   initialization of the pool. The user can set some meta data in
+ *   objects if needed. This parameter can be NULL if not needed.
+ *   The obj_init() function takes the mempool pointer, the init_arg,
+ *   the object pointer and the object number as parameters.
+ * @param obj_init_arg
+ *   An opaque pointer to data that can be used as an argument for
+ *   each call to the object constructor function.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in the case of
+ *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ *   constraint for the reserved zone.
+ * @param flags
+ *   The *flags* arguments is an OR of following flags:
+ *   - MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread
+ *     between channels in RAM: the pool allocator will add padding
+ *     between objects depending on the hardware configuration. See
+ *     Memory alignment constraints for details. If this flag is set,
+ *     the allocator will just align them to a cache line.
+ *   - MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are
+ *     cache-aligned. This flag removes this constraint, and no
+ *     padding will be present between objects. This flag implies
+ *     MEMPOOL_F_NO_SPREAD.
+ *   - MEMPOOL_F_SP_PUT: If this flag is set, the default behavior
+ *     when using rte_mempool_put() or rte_mempool_put_bulk() is
+ *     "single-producer". Otherwise, it is "multi-producers".
+ *   - MEMPOOL_F_SC_GET: If this flag is set, the default behavior
+ *     when using rte_mempool_get() or rte_mempool_get_bulk() is
+ *     "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ *   The pointer to the new allocated mempool, on success. NULL on error
+ *   with rte_errno set appropriately. Possible rte_errno values include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - E_RTE_NO_TAILQ - no tailq list could be got for the ring or mempool list
+ *    - EINVAL - cache size provided is too large
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_mempool *
+rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
+                  unsigned cache_size, unsigned private_data_size,
+                  rte_mempool_ctor_t *mp_init, void *mp_init_arg,
+                  rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg,
+                  int socket_id, unsigned flags);
+
+/**
+ * Set the default bulk count for put/get.
+ *
+ * The *count* parameter is the default number of bulk elements to
+ * get/put when using ``rte_mempool_*_{en,de}queue_bulk()``. It must
+ * be greater than 0 and less than half of the mempool size.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param count
+ *   A new water mark value.
+ * @return
+ *   - 0: Success; default_bulk_count changed.
+ *   - -EINVAL: Invalid count value.
+ */
+static inline int
+rte_mempool_set_bulk_count(struct rte_mempool *mp, unsigned count)
+{
+       if (unlikely(count == 0 || count >= mp->size))
+               return -EINVAL;
+
+       mp->bulk_default = count;
+       return 0;
+}
+
+/**
+ * Get the default bulk count for put/get.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   The default bulk count for enqueue/dequeue.
+ */
+static inline unsigned
+rte_mempool_get_bulk_count(struct rte_mempool *mp)
+{
+       return mp->bulk_default;
+}
+
+/**
+ * Dump the status of the mempool to the console.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ */
+void rte_mempool_dump(const struct rte_mempool *mp);
+
+/**
+ * @internal Put several objects back in the mempool; used internally.
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to store back in the mempool, must be strictly
+ *   positive.
+ * @param is_mp
+ *   Mono-producer (0) or multi-producers (1).
+ */
+static inline void
+__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+                   unsigned n, int is_mp)
+{
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       struct rte_mempool_cache *cache;
+       uint32_t cache_len;
+       void **cache_objs;
+       unsigned lcore_id = rte_lcore_id();
+       uint32_t cache_size = mp->cache_size;
+       uint32_t cache_add_count;
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+       /* increment stat now, adding in mempool always success */
+       __MEMPOOL_STAT_ADD(mp, put, n);
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       /* cache is not enabled or single producer */
+       if (unlikely(cache_size == 0 || is_mp == 0))
+               goto ring_enqueue;
+
+       cache = &mp->local_cache[lcore_id];
+       cache_len = cache->len;
+       cache_objs = cache->objs;
+
+       /* cache is full and we add many objects: enqueue in ring */
+       if (unlikely(cache_len == cache_size && n >= cache_size))
+               goto ring_enqueue;
+
+       /*
+        * cache is full and we add few objects: enqueue the content
+        * of the cache in ring
+        */
+       if (unlikely(cache_len == cache_size)) {
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               if (rte_ring_mp_enqueue_bulk(mp->ring, cache->objs,
+                                            cache_size) < 0)
+                       rte_panic("cannot put objects in mempool\n");
+#else
+               rte_ring_mp_enqueue_bulk(mp->ring, cache->objs,
+                                        cache_size);
+#endif
+               cache_len = 0;
+       }
+
+       /* determine how many objects we can add in cache */
+       if (likely(n <= cache_size - cache_len))
+               cache_add_count = n;
+       else
+               cache_add_count = cache_size - cache_len;
+
+       /* add in cache while there is enough room */
+       while (cache_add_count > 0) {
+               cache_objs[cache_len] = *obj_table;
+               obj_table++;
+               cache_len++;
+               n--;
+               cache_add_count--;
+       }
+
+       cache->len = cache_len;
+
+       /* no more object to add, return */
+       if (likely(n == 0))
+               return;
+
+ ring_enqueue:
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+       /* push remaining objects in ring */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       if (is_mp) {
+               if (rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n) < 0)
+                       rte_panic("cannot put objects in mempool\n");
+       }
+       else {
+               if (rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n) < 0)
+                       rte_panic("cannot put objects in mempool\n");
+       }
+#else
+       if (is_mp)
+               rte_ring_mp_enqueue_bulk(mp->ring, obj_table, n);
+       else
+               rte_ring_sp_enqueue_bulk(mp->ring, obj_table, n);
+#endif
+}
+
+
+/**
+ * Put several objects back in the mempool (multi-producers safe).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the mempool from the obj_table.
+ */
+static inline void
+rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+                       unsigned n)
+{
+       __mempool_check_cookies(mp, obj_table, n, 0);
+       __mempool_put_bulk(mp, obj_table, n, 1);
+}
+
+/**
+ * Put several objects back in the mempool (NOT multi-producers safe).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the mempool from obj_table.
+ */
+static inline void
+rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+                       unsigned n)
+{
+       __mempool_check_cookies(mp, obj_table, n, 0);
+       __mempool_put_bulk(mp, obj_table, n, 0);
+}
+
+/**
+ * Put several objects back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the mempool from obj_table.
+ */
+static inline void
+rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+                    unsigned n)
+{
+       __mempool_check_cookies(mp, obj_table, n, 0);
+       __mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
+}
+
+/**
+ * Put one object in the mempool (multi-producers safe).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ */
+static inline void
+rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
+{
+       rte_mempool_mp_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * Put one object back in the mempool (NOT multi-producers safe).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ */
+static inline void
+rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
+{
+       rte_mempool_sp_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * Put one object back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ */
+static inline void
+rte_mempool_put(struct rte_mempool *mp, void *obj)
+{
+       rte_mempool_put_bulk(mp, &obj, 1);
+}
+
+/**
+ * @internal Get several objects from the mempool; used internally.
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to get, must be strictly positive.
+ * @param is_mc
+ *   Mono-consumer (0) or multi-consumers (1).
+ * @return
+ *   - >=0: Success; number of objects supplied.
+ *   - <0: Error; code of ring dequeue function.
+ */
+static inline int
+__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
+                  unsigned n, int is_mc)
+{
+       int ret;
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       unsigned n_orig = n;
+#endif
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       struct rte_mempool_cache *cache;
+       uint32_t cache_len, cache_len_save = 0;
+       void **cache_objs;
+       unsigned lcore_id = rte_lcore_id();
+       uint32_t cache_size = mp->cache_size;
+       uint32_t cache_del_count;
+
+       cache = &mp->local_cache[lcore_id];
+
+       /* cache is not enabled or single consumer */
+       if (unlikely(cache_size == 0 || is_mc == 0))
+               goto ring_dequeue;
+
+       cache_len = cache->len;
+       cache_objs = cache->objs;
+
+       /* cache is empty and we need many objects: dequeue from ring */
+       if (unlikely(cache_len == 0 && n >= cache_size))
+               goto ring_dequeue;
+
+       /* cache is empty and we dequeue few objects: fill the cache first */
+       if (unlikely(cache_len == 0 && n < cache_size)) {
+               ret = rte_ring_mc_dequeue_bulk(mp->ring, cache_objs,
+                                              cache_size);
+               if (unlikely(ret < 0)) {
+                       __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
+                       return ret;
+               }
+
+               cache_len = cache_size;
+       }
+
+       if (likely(n <= cache_len))
+               cache_del_count = n;
+       else
+               cache_del_count = cache_len;
+
+       cache_len_save = cache_len;
+
+       /* add in cache only while there is enough room */
+       while (cache_del_count > 0) {
+               cache_len--;
+               *obj_table = cache_objs[cache_len];
+               obj_table++;
+               n--;
+               cache_del_count--;
+       }
+
+       cache->len = cache_len;
+
+       /* no more object to get, return */
+       if (likely(n == 0)) {
+               __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
+               return 0;
+       }
+
+ ring_dequeue:
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
+       /* get remaining objects from ring */
+       if (is_mc)
+               ret = rte_ring_mc_dequeue_bulk(mp->ring, obj_table, n);
+       else
+               ret = rte_ring_sc_dequeue_bulk(mp->ring, obj_table, n);
+
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+       /*
+        * bad luck, the ring is empty but we already dequeued some
+        * entries from cache, we have to restore them
+        */
+       if (unlikely(ret < 0 && cache_len_save != 0))
+               cache->len = cache_len_save;
+#endif
+
+       if (ret < 0)
+               __MEMPOOL_STAT_ADD(mp, get_fail, n_orig);
+       else
+               __MEMPOOL_STAT_ADD(mp, get_success, n_orig);
+
+       return ret;
+}
+
+/**
+ * Get several objects from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to get from mempool to obj_table.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int
+rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+       int ret;
+       ret = __mempool_get_bulk(mp, obj_table, n, 1);
+       if (ret == 0)
+               __mempool_check_cookies(mp, obj_table, n, 1);
+       return ret;
+}
+
+/**
+ * Get several objects from the mempool (NOT multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to get from the mempool to obj_table.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is
+ *     retrieved.
+ */
+static inline int
+rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+       int ret;
+       ret = __mempool_get_bulk(mp, obj_table, n, 0);
+       if (ret == 0)
+               __mempool_check_cookies(mp, obj_table, n, 1);
+       return ret;
+}
+
+/**
+ * Get several objects from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * mempool creation time (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to get from the mempool to obj_table.
+ * @return
+ *   - 0: Success; objects taken
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int
+rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+       int ret;
+       ret = __mempool_get_bulk(mp, obj_table, n,
+                                !(mp->flags & MEMPOOL_F_SC_GET));
+       if (ret == 0)
+               __mempool_check_cookies(mp, obj_table, n, 1);
+       return ret;
+}
+
+/**
+ * Get one object from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int
+rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
+{
+       return rte_mempool_mc_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Get one object from the mempool (NOT multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int
+rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
+{
+       return rte_mempool_sc_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Get one object from the mempool.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behavior that was specified at
+ * mempool creation (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int
+rte_mempool_get(struct rte_mempool *mp, void **obj_p)
+{
+       return rte_mempool_get_bulk(mp, obj_p, 1);
+}
+
+/**
+ * Return the number of entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   The number of entries in the mempool.
+ */
+unsigned rte_mempool_count(const struct rte_mempool *mp);
+
+/**
+ * Return the number of free entries in the mempool.
+ *
+ * When cache is enabled, this function has to browse the length of
+ * all lcores, so it should not be used in a data path, but only for
+ * debug purposes.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   The number of free entries in the mempool.
+ */
+static inline unsigned
+rte_mempool_free_count(const struct rte_mempool *mp)
+{
+       return mp->size - rte_mempool_count(mp);
+}
+
+/**
+ * Test if the mempool is full.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   - 1: The mempool is full.
+ *   - 0: The mempool is not full.
+ */
+static inline int
+rte_mempool_full(const struct rte_mempool *mp)
+{
+       return !!(rte_mempool_count(mp) == mp->size);
+}
+
+/**
+ * Test if the mempool is empty.
+ *
+ * When cache is enabled, this function has to browse the length of all
+ * lcores, so it should not be used in a data path, but only for debug
+ * purposes.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   - 1: The mempool is empty.
+ *   - 0: The mempool is not empty.
+ */
+static inline int
+rte_mempool_empty(const struct rte_mempool *mp)
+{
+       return !!(rte_mempool_count(mp) == 0);
+}
+
+/**
+ * Return the physical address of elt, which is an element of the pool mp.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param elt
+ *   A pointer (virtual address) to the element of the pool.
+ * @return
+ *   The physical address of the elt element.
+ */
+static inline phys_addr_t rte_mempool_virt2phy(const struct rte_mempool *mp,
+       const void *elt)
+{
+       uintptr_t off;
+
+       off = (const char *)elt - (const char *)mp;
+       return mp->phys_addr + off;
+}
+
+
+/**
+ * Check the consistency of mempool objects.
+ *
+ * Verify the coherency of fields in the mempool structure. Also check
+ * that the cookies of mempool objects (even the ones that are not
+ * present in pool) have a correct value. If not, a panic will occur.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ */
+void rte_mempool_audit(const struct rte_mempool *mp);
+
+/**
+ * Return a pointer to the private data in an mempool structure.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @return
+ *   A pointer to the private data.
+ */
+static inline void *rte_mempool_get_priv(struct rte_mempool *mp)
+{
+       return (char *)mp + sizeof(struct rte_mempool);
+}
+
+/**
+ * Dump the status of all mempools on the console
+ */
+void rte_mempool_list_dump(void);
+
+/**
+ * Search a mempool from its name
+ *
+ * @param name
+ *   The name of the mempool.
+ * @return
+ *   The pointer to the mempool matching the name, or NULL if not found.NULL on error
+ *   with rte_errno set appropriately. Possible rte_errno values include:
+ *    - ENOENT - required entry not available to return.
+ *
+ */
+struct rte_mempool *rte_mempool_lookup(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_MEMPOOL_H_ */
diff --git a/lib/librte_net/Makefile b/lib/librte_net/Makefile
new file mode 100644 (file)
index 0000000..230af6c
--- /dev/null
@@ -0,0 +1,42 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_NET)-include := rte_ip.h rte_tcp.h rte_udp.h rte_sctp.h
+
+
+include $(RTE_SDK)/mk/rte.install.mk
diff --git a/lib/librte_net/rte_ip.h b/lib/librte_net/rte_ip.h
new file mode 100644 (file)
index 0000000..2689397
--- /dev/null
@@ -0,0 +1,255 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ *      The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by the University of
+ *      California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      @(#)in.h        8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_IP_H_
+#define _RTE_IP_H_
+
+/**
+ * @file
+ *
+ * IP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * IPv4 Header
+ */
+struct ipv4_hdr {
+       uint8_t  version_ihl;           /**< version and header length */
+       uint8_t  type_of_service;       /**< type of service */
+       uint16_t total_length;          /**< length of packet */
+       uint16_t packet_id;             /**< packet ID */
+       uint16_t fragment_offset;       /**< fragmentation offset */
+       uint8_t  time_to_live;          /**< time to live */
+       uint8_t  next_proto_id;         /**< protocol ID */
+       uint16_t hdr_checksum;          /**< header checksum */
+       uint32_t src_addr;              /**< source address */
+       uint32_t dst_addr;              /**< destination address */
+} __attribute__((__packed__));
+
+/** Create IPv4 address */
+#define IPv4(a,b,c,d) ((uint32_t)(((a) & 0xff) << 24) | \
+                                          (((b) & 0xff) << 16) | \
+                                          (((c) & 0xff) << 8)  | \
+                                          ((d) & 0xff))
+
+/* IPv4 protocols */
+#define IPPROTO_IP         0  /**< dummy for IP */
+#define IPPROTO_HOPOPTS    0  /**< IP6 hop-by-hop options */
+#define IPPROTO_ICMP       1  /**< control message protocol */
+#define IPPROTO_IGMP       2  /**< group mgmt protocol */
+#define IPPROTO_GGP        3  /**< gateway^2 (deprecated) */
+#define IPPROTO_IPV4       4  /**< IPv4 encapsulation */
+#define IPPROTO_TCP        6  /**< tcp */
+#define IPPROTO_ST         7  /**< Stream protocol II */
+#define IPPROTO_EGP        8  /**< exterior gateway protocol */
+#define IPPROTO_PIGP       9  /**< private interior gateway */
+#define IPPROTO_RCCMON    10  /**< BBN RCC Monitoring */
+#define IPPROTO_NVPII     11  /**< network voice protocol*/
+#define IPPROTO_PUP       12  /**< pup */
+#define IPPROTO_ARGUS     13  /**< Argus */
+#define IPPROTO_EMCON     14  /**< EMCON */
+#define IPPROTO_XNET      15  /**< Cross Net Debugger */
+#define IPPROTO_CHAOS     16  /**< Chaos*/
+#define IPPROTO_UDP       17  /**< user datagram protocol */
+#define IPPROTO_MUX       18  /**< Multiplexing */
+#define IPPROTO_MEAS      19  /**< DCN Measurement Subsystems */
+#define IPPROTO_HMP       20  /**< Host Monitoring */
+#define IPPROTO_PRM       21  /**< Packet Radio Measurement */
+#define IPPROTO_IDP       22  /**< xns idp */
+#define IPPROTO_TRUNK1    23  /**< Trunk-1 */
+#define IPPROTO_TRUNK2    24  /**< Trunk-2 */
+#define IPPROTO_LEAF1     25  /**< Leaf-1 */
+#define IPPROTO_LEAF2     26  /**< Leaf-2 */
+#define IPPROTO_RDP       27  /**< Reliable Data */
+#define IPPROTO_IRTP      28  /**< Reliable Transaction */
+#define IPPROTO_TP        29  /**< tp-4 w/ class negotiation */
+#define IPPROTO_BLT       30  /**< Bulk Data Transfer */
+#define IPPROTO_NSP       31  /**< Network Services */
+#define IPPROTO_INP       32  /**< Merit Internodal */
+#define IPPROTO_SEP       33  /**< Sequential Exchange */
+#define IPPROTO_3PC       34  /**< Third Party Connect */
+#define IPPROTO_IDPR      35  /**< InterDomain Policy Routing */
+#define IPPROTO_XTP       36  /**< XTP */
+#define IPPROTO_DDP       37  /**< Datagram Delivery */
+#define IPPROTO_CMTP      38  /**< Control Message Transport */
+#define IPPROTO_TPXX      39  /**< TP++ Transport */
+#define IPPROTO_IL        40  /**< IL transport protocol */
+#define IPPROTO_IPV6      41  /**< IP6 header */
+#define IPPROTO_SDRP      42  /**< Source Demand Routing */
+#define IPPROTO_ROUTING   43  /**< IP6 routing header */
+#define IPPROTO_FRAGMENT  44  /**< IP6 fragmentation header */
+#define IPPROTO_IDRP      45  /**< InterDomain Routing*/
+#define IPPROTO_RSVP      46  /**< resource reservation */
+#define IPPROTO_GRE       47  /**< General Routing Encap. */
+#define IPPROTO_MHRP      48  /**< Mobile Host Routing */
+#define IPPROTO_BHA       49  /**< BHA */
+#define IPPROTO_ESP       50  /**< IP6 Encap Sec. Payload */
+#define IPPROTO_AH        51  /**< IP6 Auth Header */
+#define IPPROTO_INLSP     52  /**< Integ. Net Layer Security */
+#define IPPROTO_SWIPE     53  /**< IP with encryption */
+#define IPPROTO_NHRP      54  /**< Next Hop Resolution */
+/* 55-57: Unassigned */
+#define IPPROTO_ICMPV6    58  /**< ICMP6 */
+#define IPPROTO_NONE      59  /**< IP6 no next header */
+#define IPPROTO_DSTOPTS   60  /**< IP6 destination option */
+#define IPPROTO_AHIP      61  /**< any host internal protocol */
+#define IPPROTO_CFTP      62  /**< CFTP */
+#define IPPROTO_HELLO     63  /**< "hello" routing protocol */
+#define IPPROTO_SATEXPAK  64  /**< SATNET/Backroom EXPAK */
+#define IPPROTO_KRYPTOLAN 65  /**< Kryptolan */
+#define IPPROTO_RVD       66  /**< Remote Virtual Disk */
+#define IPPROTO_IPPC      67  /**< Pluribus Packet Core */
+#define IPPROTO_ADFS      68  /**< Any distributed FS */
+#define IPPROTO_SATMON    69  /**< Satnet Monitoring */
+#define IPPROTO_VISA      70  /**< VISA Protocol */
+#define IPPROTO_IPCV      71  /**< Packet Core Utility */
+#define IPPROTO_CPNX      72  /**< Comp. Prot. Net. Executive */
+#define IPPROTO_CPHB      73  /**< Comp. Prot. HeartBeat */
+#define IPPROTO_WSN       74  /**< Wang Span Network */
+#define IPPROTO_PVP       75  /**< Packet Video Protocol */
+#define IPPROTO_BRSATMON  76  /**< BackRoom SATNET Monitoring */
+#define IPPROTO_ND        77  /**< Sun net disk proto (temp.) */
+#define IPPROTO_WBMON     78  /**< WIDEBAND Monitoring */
+#define IPPROTO_WBEXPAK   79  /**< WIDEBAND EXPAK */
+#define IPPROTO_EON       80  /**< ISO cnlp */
+#define IPPROTO_VMTP      81  /**< VMTP */
+#define IPPROTO_SVMTP     82  /**< Secure VMTP */
+#define IPPROTO_VINES     83  /**< Banyon VINES */
+#define IPPROTO_TTP       84  /**< TTP */
+#define IPPROTO_IGP       85  /**< NSFNET-IGP */
+#define IPPROTO_DGP       86  /**< dissimilar gateway prot. */
+#define IPPROTO_TCF       87  /**< TCF */
+#define IPPROTO_IGRP      88  /**< Cisco/GXS IGRP */
+#define IPPROTO_OSPFIGP   89  /**< OSPFIGP */
+#define IPPROTO_SRPC      90  /**< Strite RPC protocol */
+#define IPPROTO_LARP      91  /**< Locus Address Resoloution */
+#define IPPROTO_MTP       92  /**< Multicast Transport */
+#define IPPROTO_AX25      93  /**< AX.25 Frames */
+#define IPPROTO_IPEIP     94  /**< IP encapsulated in IP */
+#define IPPROTO_MICP      95  /**< Mobile Int.ing control */
+#define IPPROTO_SCCSP     96  /**< Semaphore Comm. security */
+#define IPPROTO_ETHERIP   97  /**< Ethernet IP encapsulation */
+#define IPPROTO_ENCAP     98  /**< encapsulation header */
+#define IPPROTO_APES      99  /**< any private encr. scheme */
+#define IPPROTO_GMTP     100  /**< GMTP */
+#define IPPROTO_IPCOMP   108  /**< payload compression (IPComp) */
+/* 101-254: Partly Unassigned */
+#define IPPROTO_PIM      103  /**< Protocol Independent Mcast */
+#define IPPROTO_PGM      113  /**< PGM */
+#define IPPROTO_SCTP     132  /**< Stream Control Transport Protocol */
+/* 255: Reserved */
+/* BSD Private, local use, namespace incursion */
+#define IPPROTO_DIVERT   254  /**< divert pseudo-protocol */
+#define IPPROTO_RAW      255  /**< raw IP packet */
+#define IPPROTO_MAX      256  /**< maximum protocol number */
+
+/*
+ * IPv4 address types
+ */
+#define IPV4_ANY              ((uint32_t)0x00000000) /**< 0.0.0.0 */
+#define IPV4_LOOPBACK         ((uint32_t)0x7f000001) /**< 127.0.0.1 */
+#define IPV4_BROADCAST        ((uint32_t)0xe0000000) /**< 224.0.0.0 */
+#define IPV4_ALLHOSTS_GROUP   ((uint32_t)0xe0000001) /**< 224.0.0.1 */
+#define IPV4_ALLRTRS_GROUP    ((uint32_t)0xe0000002) /**< 224.0.0.2 */
+#define IPV4_MAX_LOCAL_GROUP  ((uint32_t)0xe00000ff) /**< 224.0.0.255 */
+
+/*
+ * IPv4 Multicast-related macros
+ */
+#define IPV4_MIN_MCAST  IPv4(224, 0, 0, 0)          /**< Minimal IPv4-multicast address */
+#define IPV4_MAX_MCAST  IPv4(239, 255, 255, 255)    /**< Maximum IPv4 multicast address */
+
+#define IS_IPV4_MCAST(x) \
+       ((x) >= IPV4_MIN_MCAST && (x) <= IPV4_MAX_MCAST) /**< check if IPv4 address is multicast */
+
+/**
+ * IPv6 Header
+ */
+struct ipv6_hdr {
+       uint32_t vtc_flow;     /**< IP version, traffic class & flow label. */
+       uint16_t payload_len;  /**< IP packet length - includes sizeof(ip_header). */
+       uint8_t  proto;        /**< Protocol, next header. */
+       uint8_t  hop_limits;   /**< Hop limits. */
+       uint8_t  src_addr[16]; /**< IP address of source host. */
+       uint8_t  dst_addr[16]; /**< IP address of destination host(s). */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_IP_H_ */
diff --git a/lib/librte_net/rte_sctp.h b/lib/librte_net/rte_sctp.h
new file mode 100644 (file)
index 0000000..da7b562
--- /dev/null
@@ -0,0 +1,101 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ *      The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by the University of
+ *      California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      @(#)in.h        8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+/**
+ * @file
+ *
+ * SCTP-related defines
+ */
+
+#ifndef _RTE_SCTP_H_
+#define _RTE_SCTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * SCTP Header
+ */
+struct sctp_hdr {
+       uint16_t src_port; /**< Source port. */
+       uint16_t dst_port; /**< Destin port. */
+       uint32_t tag;      /**< Validation tag. */
+       uint32_t cksum;    /**< Checksum. */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_SCTP_H_ */
diff --git a/lib/librte_net/rte_tcp.h b/lib/librte_net/rte_tcp.h
new file mode 100644 (file)
index 0000000..25bd105
--- /dev/null
@@ -0,0 +1,106 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ *      The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by the University of
+ *      California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      @(#)in.h        8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_TCP_H_
+#define _RTE_TCP_H_
+
+/**
+ * @file
+ *
+ * TCP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * TCP Header
+ */
+struct tcp_hdr {
+       uint16_t src_port;  /**< TCP source port. */
+       uint16_t dst_port;  /**< TCP destination port. */
+       uint32_t sent_seq;  /**< TX data sequence number. */
+       uint32_t recv_ack;  /**< RX data acknowledgement sequence number. */
+       uint8_t  data_off;  /**< Data offset. */
+       uint8_t  tcp_flags; /**< TCP flags */
+       uint16_t rx_win;    /**< RX flow control window. */
+       uint16_t cksum;     /**< TCP checksum. */
+       uint16_t tcp_urp;   /**< TCP urgent pointer, if any. */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_TCP_H_ */
diff --git a/lib/librte_net/rte_udp.h b/lib/librte_net/rte_udp.h
new file mode 100644 (file)
index 0000000..1da163f
--- /dev/null
@@ -0,0 +1,101 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Copyright (c) 1982, 1986, 1990, 1993
+ *      The Regents of the University of California.  All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *      This product includes software developed by the University of
+ *      California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *      @(#)in.h        8.3 (Berkeley) 1/3/94
+ * $FreeBSD: src/sys/netinet/in.h,v 1.82 2003/10/25 09:37:10 ume Exp $
+ */
+
+#ifndef _RTE_UDP_H_
+#define _RTE_UDP_H_
+
+/**
+ * @file
+ *
+ * UDP-related defines
+ */
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * UDP Header
+ */
+struct udp_hdr {
+       uint16_t src_port;    /**< UDP source port. */
+       uint16_t dst_port;    /**< UDP destination port. */
+       uint16_t dgram_len;   /**< UDP datagram length */
+       uint16_t dgram_cksum; /**< UDP datagram checksum */
+} __attribute__((__packed__));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* RTE_UDP_H_ */
diff --git a/lib/librte_pmd_igb/Makefile b/lib/librte_pmd_igb/Makefile
new file mode 100644 (file)
index 0000000..127f466
--- /dev/null
@@ -0,0 +1,64 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_igb.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_nvm.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_manage.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mac.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_82575.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_osdep.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += igb/e1000_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += e1000_ethdev.c
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IGB_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pmd_igb/e1000_ethdev.c b/lib/librte_pmd_igb/e1000_ethdev.c
new file mode 100644 (file)
index 0000000..a984428
--- /dev/null
@@ -0,0 +1,1319 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+
+#include "e1000_logs.h"
+#include "igb/e1000_api.h"
+#include "igb/e1000_hw.h"
+#include "e1000_ethdev.h"
+
+static int  eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+               uint16_t nb_tx_q);
+static int  eth_igb_start(struct rte_eth_dev *dev);
+static void eth_igb_stop(struct rte_eth_dev *dev);
+static void eth_igb_close(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev);
+static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_enable(struct rte_eth_dev *dev);
+static void eth_igb_allmulticast_disable(struct rte_eth_dev *dev);
+static int  eth_igb_link_update(struct rte_eth_dev *dev,
+                               int wait_to_complete);
+static void eth_igb_stats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *rte_stats);
+static void eth_igb_stats_reset(struct rte_eth_dev *dev);
+static void eth_igb_infos_get(struct rte_eth_dev *dev,
+                               struct rte_eth_dev_info *dev_info);
+static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
+                               struct rte_eth_fc_conf *fc_conf);
+static int eth_igb_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
+static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
+static void eth_igb_interrupt_handler(struct rte_intr_handle *handle,
+                                                       void *param);
+static int  igb_hardware_init(struct e1000_hw *hw);
+static void igb_hw_control_acquire(struct e1000_hw *hw);
+static void igb_hw_control_release(struct e1000_hw *hw);
+static void igb_init_manageability(struct e1000_hw *hw);
+static void igb_release_manageability(struct e1000_hw *hw);
+static void igb_vlan_hw_support_enable(struct rte_eth_dev *dev);
+static void igb_vlan_hw_support_disable(struct rte_eth_dev *dev);
+static void eth_igb_vlan_filter_set(struct rte_eth_dev *dev,
+                                     uint16_t vlan_id,
+                                     int on);
+static int eth_igb_led_on(struct rte_eth_dev *dev);
+static int eth_igb_led_off(struct rte_eth_dev *dev);
+
+static void igb_intr_disable(struct e1000_hw *hw);
+static int  igb_get_rx_buffer_size(struct e1000_hw *hw);
+static void eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+               uint32_t index, uint32_t pool);
+static void eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index);
+
+#define IGB_FC_PAUSE_TIME 0x0680
+#define IGB_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
+#define IGB_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
+
+static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_igb_map[] = {
+
+#undef RTE_LIBRTE_IXGBE_PMD
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{.device_id = 0},
+};
+
+static struct eth_dev_ops eth_igb_ops = {
+       .dev_configure        = eth_igb_configure,
+       .dev_start            = eth_igb_start,
+       .dev_stop             = eth_igb_stop,
+       .dev_close            = eth_igb_close,
+       .promiscuous_enable   = eth_igb_promiscuous_enable,
+       .promiscuous_disable  = eth_igb_promiscuous_disable,
+       .allmulticast_enable  = eth_igb_allmulticast_enable,
+       .allmulticast_disable = eth_igb_allmulticast_disable,
+       .link_update          = eth_igb_link_update,
+       .stats_get            = eth_igb_stats_get,
+       .stats_reset          = eth_igb_stats_reset,
+       .dev_infos_get        = eth_igb_infos_get,
+       .vlan_filter_set      = eth_igb_vlan_filter_set,
+       .rx_queue_setup       = eth_igb_rx_queue_setup,
+       .tx_queue_setup       = eth_igb_tx_queue_setup,
+       .dev_led_on           = eth_igb_led_on,
+       .dev_led_off          = eth_igb_led_off,
+       .flow_ctrl_set        = eth_igb_flow_ctrl_set,
+       .mac_addr_add         = eth_igb_rar_set,
+       .mac_addr_remove      = eth_igb_rar_clear,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = link;
+       struct rte_eth_link *src = &(dev->data->dev_link);
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_igb_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *src = link;
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+static void
+igb_identify_hardware(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       hw->vendor_id = dev->pci_dev->id.vendor_id;
+       hw->device_id = dev->pci_dev->id.device_id;
+       hw->subsystem_vendor_id = dev->pci_dev->id.subsystem_vendor_id;
+       hw->subsystem_device_id = dev->pci_dev->id.subsystem_device_id;
+
+       e1000_set_mac_type(hw);
+
+       /* need to check if it is a vf device below */
+}
+
+static int
+eth_igb_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+                  struct rte_eth_dev *eth_dev)
+{
+       int error = 0;
+       struct rte_pci_device *pci_dev;
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+
+       pci_dev = eth_dev->pci_dev;
+       eth_dev->dev_ops = &eth_igb_ops;
+       eth_dev->rx_pkt_burst = &eth_igb_recv_pkts;
+       eth_dev->tx_pkt_burst = &eth_igb_xmit_pkts;
+
+       /* for secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               if (eth_dev->data->scattered_rx)
+                       eth_dev->rx_pkt_burst = &eth_igb_recv_scattered_pkts;
+               return 0;
+       }
+
+       hw->hw_addr= (void *)pci_dev->mem_resource.addr;
+
+       igb_identify_hardware(eth_dev);
+
+       if (e1000_setup_init_funcs(hw, TRUE) != E1000_SUCCESS) {
+               error = -EIO;
+               goto err_late;
+       }
+
+       e1000_get_bus_info(hw);
+
+       hw->mac.autoneg = 1;
+       hw->phy.autoneg_wait_to_complete = 0;
+       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+
+       /* Copper options */
+       if (hw->phy.media_type == e1000_media_type_copper) {
+               hw->phy.mdix = 0; /* AUTO_ALL_MODES */
+               hw->phy.disable_polarity_correction = 0;
+               hw->phy.ms_type = e1000_ms_hw_default;
+       }
+
+       /*
+        * Start from a known state, this is important in reading the nvm
+        * and mac from that.
+        */
+       e1000_reset_hw(hw);
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (e1000_validate_nvm_checksum(hw) < 0) {
+               /*
+                * Some PCI-E parts fail the first check due to
+                * the link being in sleep state, call it again,
+                * if it fails a second time its a real issue.
+                */
+               if (e1000_validate_nvm_checksum(hw) < 0) {
+                       PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
+                       error = -EIO;
+                       goto err_late;
+               }
+       }
+
+       /* Read the permanent MAC address out of the EEPROM */
+       if (e1000_read_mac_addr(hw) != 0) {
+               PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
+               error = -EIO;
+               goto err_late;
+       }
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("e1000",
+               ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
+                                               "store MAC addresses",
+                               ETHER_ADDR_LEN * hw->mac.rar_entry_count);
+               error = -ENOMEM;
+               goto err_late;
+       }
+
+       /* Copy the permanent MAC address */
+       ether_addr_copy((struct ether_addr *)hw->mac.addr, &eth_dev->data->mac_addrs[0]);
+
+       /* initialize the vfta */
+       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+       /* Now initialize the hardware */
+       if (igb_hardware_init(hw) != 0) {
+               PMD_INIT_LOG(ERR, "Hardware initialization failed");
+               rte_free(eth_dev->data->mac_addrs);
+               eth_dev->data->mac_addrs = NULL;
+               error = -ENODEV;
+               goto err_late;
+       }
+       hw->mac.get_link_status = 1;
+
+       /* Indicate SOL/IDER usage */
+       if (e1000_check_reset_block(hw) < 0) {
+               PMD_INIT_LOG(ERR, "PHY reset is blocked due to"
+                                       "SOL/IDER session");
+       }
+
+       PMD_INIT_LOG(INFO, "port_id %d vendorID=0x%x deviceID=0x%x\n",
+                    eth_dev->data->port_id, pci_dev->id.vendor_id,
+                    pci_dev->id.device_id);
+
+       rte_intr_callback_register(&(pci_dev->intr_handle),
+               eth_igb_interrupt_handler, (void *)eth_dev);
+
+       return 0;
+
+err_late:
+       igb_hw_control_release(hw);
+
+       return (error);
+}
+
+static struct eth_driver rte_igb_pmd = {
+       {
+               .name = "rte_igb_pmd",
+               .id_table = pci_id_igb_map,
+               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+       },
+       .eth_dev_init = eth_igb_dev_init,
+       .dev_private_size = sizeof(struct e1000_adapter),
+};
+
+int
+rte_igb_pmd_init(void)
+{
+       rte_eth_driver_register(&rte_igb_pmd);
+       return 0;
+}
+
+static int
+eth_igb_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+{
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       int diag;
+
+       PMD_INIT_LOG(DEBUG, ">>");
+
+       intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+
+       /* Allocate the array of pointers to RX structures */
+       diag = igb_dev_rx_queue_alloc(dev, nb_rx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
+                                       " pointers to RX queues failed",
+                                       dev->data->port_id, nb_rx_q);
+               return diag;
+       }
+
+       /* Allocate the array of pointers to TX structures */
+       diag = igb_dev_tx_queue_alloc(dev, nb_tx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%u allocation of array of %u"
+                                       " pointers to TX queues failed",
+                                       dev->data->port_id, nb_tx_q);
+
+               return diag;
+       }
+
+       PMD_INIT_LOG(DEBUG, "<<");
+
+       return (0);
+}
+
+static int
+eth_igb_start(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int ret, i;
+
+       PMD_INIT_LOG(DEBUG, ">>");
+
+       igb_intr_disable(hw);
+
+       /* Power up the phy. Needed to make the link go Up */
+       e1000_power_up_phy(hw);
+
+       /*
+        * Packet Buffer Allocation (PBA)
+        * Writing PBA sets the receive portion of the buffer
+        * the remainder is used for the transmit buffer.
+        */
+       if (hw->mac.type == e1000_82575) {
+               uint32_t pba;
+
+               pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */
+               E1000_WRITE_REG(hw, E1000_PBA, pba);
+       }
+
+       /* Put the address into the Receive Address Array */
+       e1000_rar_set(hw, hw->mac.addr, 0);
+
+       /* Initialize the hardware */
+       if (igb_hardware_init(hw)) {
+               PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
+               return (-1);
+       }
+
+       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+
+       /* Configure for OS presence */
+       igb_init_manageability(hw);
+
+       eth_igb_tx_init(dev);
+
+       /* This can fail when allocating mbufs for descriptor rings */
+       ret = eth_igb_rx_init(dev);
+       if (ret) {
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
+               return ret;
+       }
+
+       e1000_clear_hw_cntrs_base_generic(hw);
+
+       /*
+        * If VLAN filtering is enabled, set up VLAN tag offload and filtering
+        * and restore the VFTA.
+        */
+       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               igb_vlan_hw_support_enable(dev);
+       else
+               igb_vlan_hw_support_disable(dev);
+
+       /*
+        * Configure the Interrupt Moderation register (EITR) with the maximum
+        * possible value (0xFFFF) to minimize "System Partial Write" issued by
+        * spurious [DMA] memory updates of RX and TX ring descriptors.
+        *
+        * With a EITR granularity of 2 microseconds in the 82576, only 7/8
+        * spurious memory updates per second should be expected.
+        * ((65535 * 2) / 1000.1000 ~= 0.131 second).
+        *
+        * Because interrupts are not used at all, the MSI-X is not activated
+        * and interrupt moderation is controlled by EITR[0].
+        *
+        * Note that having [almost] disabled memory updates of RX and TX ring
+        * descriptors through the Interrupt Moderation mechanism, memory
+        * updates of ring descriptors are now moderated by the configurable
+        * value of Write-Back Threshold registers.
+        */
+       if ((hw->mac.type == e1000_82576) || (hw->mac.type == e1000_82580) ||
+                       (hw->mac.type == e1000_i350)) {
+               uint32_t ivar;
+
+               /* Enable all RX & TX queues in the IVAR registers */
+               ivar = (uint32_t) ((E1000_IVAR_VALID << 16) | E1000_IVAR_VALID);
+               for (i = 0; i < 8; i++)
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, i, ivar);
+
+               /* Configure EITR with the maximum possible value (0xFFFF) */
+               E1000_WRITE_REG(hw, E1000_EITR(0), 0xFFFF);
+       }
+
+       /* Don't reset the phy next time init gets called */
+       hw->phy.reset_disable = 1;
+
+       /* Setup link speed and duplex */
+       switch (dev->data->dev_conf.link_speed) {
+       case ETH_LINK_SPEED_AUTONEG:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_HALF_DUPLEX;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_FULL_DUPLEX;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_10:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_10_SPEED;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_10_HALF;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_10_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_100:
+               if (dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX)
+                       hw->phy.autoneg_advertised = E1000_ALL_100_SPEED;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_HALF_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_100_HALF;
+               else if (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX)
+                       hw->phy.autoneg_advertised = ADVERTISE_100_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_1000:
+               if ((dev->data->dev_conf.link_duplex == ETH_LINK_AUTONEG_DUPLEX) ||
+                               (dev->data->dev_conf.link_duplex == ETH_LINK_FULL_DUPLEX))
+                       hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+               else
+                       goto error_invalid_config;
+               break;
+       case ETH_LINK_SPEED_10000:
+       default:
+               goto error_invalid_config;
+       }
+       e1000_setup_link(hw);
+
+       PMD_INIT_LOG(DEBUG, "<<");
+
+       /* check if lsc interrupt feature is enabled */
+       if (dev->data->dev_conf.intr_conf.lsc != 0)
+               return eth_igb_interrupt_setup(dev);
+
+       return (0);
+
+error_invalid_config:
+       PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u\n",
+                       dev->data->dev_conf.link_speed,
+                       dev->data->dev_conf.link_duplex, dev->data->port_id);
+       return -1;
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC.
+ *
+ **********************************************************************/
+static void
+eth_igb_stop(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link;
+
+       igb_intr_disable(hw);
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       /* Power down the phy. Needed to make the link go Down */
+       e1000_power_down_phy(hw);
+
+       igb_dev_clear_queues(dev);
+
+       /* clear the recorded link status */
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static void
+eth_igb_close(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link;
+
+       eth_igb_stop(dev);
+       e1000_phy_hw_reset(hw);
+       igb_release_manageability(hw);
+       igb_hw_control_release(hw);
+
+       igb_dev_clear_queues(dev);
+
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+}
+
+static int
+igb_get_rx_buffer_size(struct e1000_hw *hw)
+{
+       uint32_t rx_buf_size;
+       if (hw->mac.type == e1000_82576) {
+               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xffff) << 10;
+       } else if (hw->mac.type == e1000_82580) {
+               /* PBS needs to be translated according to a lookup table */
+               rx_buf_size = (E1000_READ_REG(hw, E1000_RXPBS) & 0xf);
+               rx_buf_size = (uint32_t) e1000_rxpbs_adjust_82580(rx_buf_size);
+               rx_buf_size = (rx_buf_size << 10);
+       } else {
+               rx_buf_size = (E1000_READ_REG(hw, E1000_PBA) & 0xffff) << 10;
+       }
+
+       return rx_buf_size;
+}
+
+/*********************************************************************
+ *
+ *  Initialize the hardware
+ *
+ **********************************************************************/
+static int
+igb_hardware_init(struct e1000_hw *hw)
+{
+       uint32_t rx_buf_size;
+       int diag;
+
+       /* Let the firmware know the OS is in control */
+       igb_hw_control_acquire(hw);
+
+       /*
+        * These parameters control the automatic generation (Tx) and
+        * response (Rx) to Ethernet PAUSE frames.
+        * - High water mark should allow for at least two standard size (1518)
+        *   frames to be received after sending an XOFF.
+        * - Low water mark works best when it is very near the high water mark.
+        *   This allows the receiver to restart by sending XON when it has
+        *   drained a bit. Here we use an arbitary value of 1500 which will
+        *   restart after one full frame is pulled from the buffer. There
+        *   could be several smaller frames in the buffer and if so they will
+        *   not trigger the XON until their total number reduces the buffer
+        *   by 1500.
+        * - The pause time is fairly large at 1000 x 512ns = 512 usec.
+        */
+       rx_buf_size = igb_get_rx_buffer_size(hw);
+
+       hw->fc.high_water = rx_buf_size - (ETHER_MAX_LEN * 2);
+       hw->fc.low_water = hw->fc.high_water - 1500;
+       hw->fc.pause_time = IGB_FC_PAUSE_TIME;
+       hw->fc.send_xon = 1;
+
+       /* Set Flow control, use the tunable location if sane */
+       if ((igb_fc_setting != e1000_fc_none) && (igb_fc_setting < 4))
+               hw->fc.requested_mode = igb_fc_setting;
+       else
+               hw->fc.requested_mode = e1000_fc_none;
+
+       /* Issue a global reset */
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       diag = e1000_init_hw(hw);
+       if (diag < 0)
+               return (diag);
+
+       E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
+       e1000_get_phy_info(hw);
+       e1000_check_for_link(hw);
+
+       return (0);
+}
+
+/* This function is based on igb_update_stats_counters() in igb/if_igb.c */
+static void
+eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_hw_stats *stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       int pause_frames;
+
+       if(hw->phy.media_type == e1000_media_type_copper ||
+           (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+               stats->symerrs +=
+                   E1000_READ_REG(hw,E1000_SYMERRS);
+               stats->sec += E1000_READ_REG(hw, E1000_SEC);
+       }
+
+       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+       stats->scc += E1000_READ_REG(hw, E1000_SCC);
+       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+       stats->colc += E1000_READ_REG(hw, E1000_COLC);
+       stats->dc += E1000_READ_REG(hw, E1000_DC);
+       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+       /*
+       ** For watchdog management we need to know if we have been
+       ** paused during the last interval, so capture that here.
+       */
+       pause_frames = E1000_READ_REG(hw, E1000_XOFFRXC);
+       stats->xoffrxc += pause_frames;
+       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+       /* For the 64-bit byte counters the low dword must be read first. */
+       /* Both registers clear on the read of the high dword */
+
+       stats->gorc += E1000_READ_REG(hw, E1000_GORCL);
+       stats->gorc += ((uint64_t)E1000_READ_REG(hw, E1000_GORCH) << 32);
+       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL);
+       stats->gotc += ((uint64_t)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+       stats->roc += E1000_READ_REG(hw, E1000_ROC);
+       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+       stats->tor += E1000_READ_REG(hw, E1000_TORH);
+       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+       /* Interrupt Counts */
+
+       stats->iac += E1000_READ_REG(hw, E1000_IAC);
+       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+       /* Host to Card Statistics */
+
+       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+       stats->hgorc += E1000_READ_REG(hw, E1000_HGORCL);
+       stats->hgorc += ((uint64_t)E1000_READ_REG(hw, E1000_HGORCH) << 32);
+       stats->hgotc += E1000_READ_REG(hw, E1000_HGOTCL);
+       stats->hgotc += ((uint64_t)E1000_READ_REG(hw, E1000_HGOTCH) << 32);
+       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+       if (rte_stats == NULL)
+               return;
+
+       /* Rx Errors */
+       rte_stats->ierrors = stats->rxerrc + stats->crcerrs + stats->algnerrc +
+           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
+
+       /* Tx Errors */
+       rte_stats->oerrors = stats->ecol + stats->latecol;
+
+       rte_stats->ipackets = stats->gprc;
+       rte_stats->opackets = stats->gptc;
+       rte_stats->ibytes   = stats->gorc;
+       rte_stats->obytes   = stats->gotc;
+}
+
+static void
+eth_igb_stats_reset(struct rte_eth_dev *dev)
+{
+       struct e1000_hw_stats *hw_stats =
+                       E1000_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* HW registers are cleared on read */
+       eth_igb_stats_get(dev, NULL);
+
+       /* Reset software totals */
+       memset(hw_stats, 0, sizeof(*hw_stats));
+}
+
+static void
+eth_igb_infos_get(struct rte_eth_dev *dev,
+                   struct rte_eth_dev_info *dev_info)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
+       dev_info->max_rx_pktlen  = 0x3FFF; /* See RLPML register. */
+       dev_info->max_mac_addrs = hw->mac.rar_entry_count;
+
+       switch (hw->mac.type) {
+       case e1000_82575:
+               dev_info->max_rx_queues = 4;
+               dev_info->max_tx_queues = 4;
+               break;
+
+       case e1000_82576:
+               dev_info->max_rx_queues = 16;
+               dev_info->max_tx_queues = 16;
+               break;
+
+       case e1000_82580:
+               dev_info->max_rx_queues = 8;
+               dev_info->max_tx_queues = 8;
+               break;
+
+       case e1000_i350:
+               dev_info->max_rx_queues = 8;
+               dev_info->max_tx_queues = 8;
+               break;
+
+       default:
+               /* Should not happen */
+               dev_info->max_rx_queues = 0;
+               dev_info->max_tx_queues = 0;
+       }
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link, old;
+       int link_check, count;
+
+       link_check = 0;
+       hw->mac.get_link_status = 1;
+
+       /* possible wait-to-complete in up to 9 seconds */
+       for (count = 0; count < IGB_LINK_UPDATE_CHECK_TIMEOUT; count ++) {
+               /* Read the real link status */
+               switch (hw->phy.media_type) {
+               case e1000_media_type_copper:
+                       /* Do the work to read phy */
+                       e1000_check_for_link(hw);
+                       link_check = !hw->mac.get_link_status;
+                       break;
+
+               case e1000_media_type_fiber:
+                       e1000_check_for_link(hw);
+                       link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+                                     E1000_STATUS_LU);
+                       break;
+
+               case e1000_media_type_internal_serdes:
+                       e1000_check_for_link(hw);
+                       link_check = hw->mac.serdes_has_link;
+                       break;
+
+               default:
+               case e1000_media_type_unknown:
+                       break;
+               }
+               if (link_check || wait_to_complete == 0)
+                       break;
+               rte_delay_ms(IGB_LINK_UPDATE_CHECK_INTERVAL);
+       }
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_read_link_status(dev, &link);
+       old = link;
+
+       /* Now we check if a transition has happened */
+       if (link_check) {
+               hw->mac.ops.get_link_up_info(hw, &link.link_speed,
+                                         &link.link_duplex);
+               link.link_status = 1;
+       } else if (!link_check) {
+               link.link_speed = 0;
+               link.link_duplex = 0;
+               link.link_status = 0;
+       }
+       rte_igb_dev_atomic_write_link_status(dev, &link);
+
+       /* not changed */
+       if (old.link_status == link.link_status)
+               return -1;
+
+       /* changed */
+       return 0;
+}
+
+/*
+ * igb_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means
+ * that the driver is loaded.
+ */
+static void
+igb_hw_control_acquire(struct e1000_hw *hw)
+{
+       uint32_t ctrl_ext;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ */
+static void
+igb_hw_control_release(struct e1000_hw *hw)
+{
+       uint32_t ctrl_ext;
+
+       /* Let firmware taken over control of h/w */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+                       ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features.
+ */
+static void
+igb_init_manageability(struct e1000_hw *hw)
+{
+       if (e1000_enable_mng_pass_thru(hw)) {
+               uint32_t manc2h = E1000_READ_REG(hw, E1000_MANC2H);
+               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+               /* disable hardware interception of ARP */
+               manc &= ~(E1000_MANC_ARP_EN);
+
+               /* enable receiving management packets to the host */
+               manc |= E1000_MANC_EN_MNG2HOST;
+               manc2h |= 1 << 5;  /* Mng Port 623 */
+               manc2h |= 1 << 6;  /* Mng Port 664 */
+               E1000_WRITE_REG(hw, E1000_MANC2H, manc2h);
+               E1000_WRITE_REG(hw, E1000_MANC, manc);
+       }
+}
+
+static void
+igb_release_manageability(struct e1000_hw *hw)
+{
+       if (e1000_enable_mng_pass_thru(hw)) {
+               uint32_t manc = E1000_READ_REG(hw, E1000_MANC);
+
+               manc |= E1000_MANC_ARP_EN;
+               manc &= ~E1000_MANC_EN_MNG2HOST;
+
+               E1000_WRITE_REG(hw, E1000_MANC, manc);
+       }
+}
+
+static void
+eth_igb_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl &= (~E1000_RCTL_UPE);
+       if (dev->data->all_multicast == 1)
+               rctl |= E1000_RCTL_MPE;
+       else
+               rctl &= (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl |= E1000_RCTL_MPE;
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t rctl;
+
+       if (dev->data->promiscuous == 1)
+               return; /* must remain in all_multicast mode */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       rctl &= (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+static void
+eth_igb_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t vfta;
+       uint32_t vid_idx;
+       uint32_t vid_bit;
+
+       vid_idx = (uint32_t) ((vlan_id >> E1000_VFTA_ENTRY_SHIFT) &
+                             E1000_VFTA_ENTRY_MASK);
+       vid_bit = (uint32_t) (1 << (vlan_id & E1000_VFTA_ENTRY_BIT_SHIFT_MASK));
+       vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, vid_idx);
+       if (on)
+               vfta |= vid_bit;
+       else
+               vfta &= ~vid_bit;
+       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, vid_idx, vfta);
+
+       /* update local VFTA copy */
+       shadow_vfta->vfta[vid_idx] = vfta;
+}
+
+static void
+igb_vlan_hw_support_enable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_vfta * shadow_vfta =
+               E1000_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t reg;
+       int i;
+
+       /* VLAN Mode Enable */
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg |= E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+       /* Filter Table Enable */
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       reg &= ~E1000_RCTL_CFIEN;
+       reg |= E1000_RCTL_VFE;
+       E1000_WRITE_REG(hw, E1000_RCTL, reg);
+
+       /* Update maximum frame size */
+       reg = E1000_READ_REG(hw, E1000_RLPML);
+       reg += VLAN_TAG_SIZE;
+       E1000_WRITE_REG(hw, E1000_RLPML, reg);
+
+       /* restore VFTA table */
+       for (i = 0; i < E1000_VFTA_SIZE; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, i, shadow_vfta->vfta[i]);
+}
+
+static void
+igb_vlan_hw_support_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t reg;
+
+       /* VLAN Mode disable */
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg &= ~E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+}
+
+static void
+igb_intr_disable(struct e1000_hw *hw)
+{
+       E1000_WRITE_REG(hw, E1000_IMC, ~0);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * It enables the interrupt mask and then enable the interrupt.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       E1000_WRITE_REG(hw, E1000_IMS, E1000_ICR_LSC);
+       E1000_WRITE_FLUSH(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       return 0;
+}
+
+/*
+ * It reads ICR and gets interrupt causes, check it and set a bit flag
+ * to update link status.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t icr;
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       /* read-on-clear nic registers here */
+       icr = E1000_READ_REG(hw, E1000_ICR);
+       if (icr & E1000_ICR_LSC) {
+               intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
+       }
+
+       return 0;
+}
+
+/*
+ * It executes link_update after knowing an interrupt is prsent.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+eth_igb_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw =
+               E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct e1000_interrupt *intr =
+               E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       uint32_t tctl, rctl;
+       struct rte_eth_link link;
+       int ret;
+
+       if (!(intr->flags & E1000_FLAG_NEED_LINK_UPDATE))
+               return -1;
+
+       intr->flags &= ~E1000_FLAG_NEED_LINK_UPDATE;
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       /* set get_link_status to check register later */
+       hw->mac.get_link_status = 1;
+       ret = eth_igb_link_update(dev, 0);
+
+       /* check if link has changed */
+       if (ret < 0)
+               return 0;
+
+       memset(&link, 0, sizeof(link));
+       rte_igb_dev_atomic_read_link_status(dev, &link);
+       if (link.link_status) {
+               PMD_INIT_LOG(INFO,
+                       " Port %d: Link Up - speed %u Mbps - %s\n",
+                       dev->data->port_id, (unsigned)link.link_speed,
+                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                               "full-duplex" : "half-duplex");
+       } else {
+               PMD_INIT_LOG(INFO, " Port %d: Link Down\n",
+                                       dev->data->port_id);
+       }
+       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+                               dev->pci_dev->addr.domain,
+                               dev->pci_dev->addr.bus,
+                               dev->pci_dev->addr.devid,
+                               dev->pci_dev->addr.function);
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       if (link.link_status) {
+               /* enable Tx/Rx */
+               tctl |= E1000_TCTL_EN;
+               rctl |= E1000_RCTL_EN;
+       } else {
+               /* disable Tx/Rx */
+               tctl &= ~E1000_TCTL_EN;
+               rctl &= ~E1000_RCTL_EN;
+       }
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+       E1000_WRITE_FLUSH(hw);
+
+       return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered at first.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+eth_igb_interrupt_handler(struct rte_intr_handle *handle, void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+
+       eth_igb_interrupt_get_status(dev);
+       eth_igb_interrupt_action(dev);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+}
+
+static int
+eth_igb_led_on(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_led_off(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+eth_igb_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct e1000_hw *hw;
+       int err;
+       enum e1000_fc_mode rte_fcmode_2_e1000_fcmode[] = {
+               e1000_fc_none,
+               e1000_fc_rx_pause,
+               e1000_fc_tx_pause,
+               e1000_fc_full
+       };
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       rx_buf_size = igb_get_rx_buffer_size(hw);
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+
+       /* At least reserve one Ethernet frame for watermark */
+       max_high_water = rx_buf_size - ETHER_MAX_LEN;
+       if ((fc_conf->high_water > max_high_water) ||
+               (fc_conf->high_water < fc_conf->low_water)) {
+               PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value \n");
+               PMD_INIT_LOG(ERR, "high water must <=  0x%x \n", max_high_water);
+               return (-EINVAL);
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
+       hw->fc.pause_time     = fc_conf->pause_time;
+       hw->fc.high_water     = fc_conf->high_water;
+       hw->fc.low_water      = fc_conf->low_water;
+       hw->fc.send_xon       = fc_conf->send_xon;
+
+       err = e1000_setup_link_generic(hw);
+       if (err == E1000_SUCCESS) {
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x \n", err);
+       return (-EIO);
+}
+
+static void
+eth_igb_rar_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+               uint32_t index, __rte_unused uint32_t pool)
+{
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       e1000_rar_set(hw, mac_addr->addr_bytes, index);
+}
+
+static void
+eth_igb_rar_clear(struct rte_eth_dev *dev, uint32_t index)
+{
+       uint8_t addr[ETHER_ADDR_LEN];
+       struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       memset(addr, 0, sizeof(addr));
+
+       e1000_rar_set(hw, addr, index);
+}
diff --git a/lib/librte_pmd_igb/e1000_ethdev.h b/lib/librte_pmd_igb/e1000_ethdev.h
new file mode 100644 (file)
index 0000000..201866b
--- /dev/null
@@ -0,0 +1,117 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _E1000_ETHDEV_H_
+#define _E1000_ETHDEV_H_
+
+/* need update link, bit flag */
+#define E1000_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+
+/*
+ * Defines that were not part of e1000_hw.h as they are not used by the FreeBSD
+ * driver.
+ */
+#define E1000_ADVTXD_POPTS_TXSM     0x00000200 /* L4 Checksum offload request */
+#define E1000_ADVTXD_POPTS_IXSM     0x00000100 /* IP Checksum offload request */
+#define E1000_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE of Reserved */
+#define E1000_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
+#define E1000_RXD_ERR_CKSUM_BIT     29 
+#define E1000_RXD_ERR_CKSUM_MSK     3
+#define E1000_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
+
+#define E1000_VFTA_SIZE 128
+
+/* structure for interrupt relative data */
+struct e1000_interrupt {
+       uint32_t flags;
+};
+
+/* local vfta copy */
+struct e1000_vfta {
+       uint32_t vfta[E1000_VFTA_SIZE];
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct e1000_adapter {
+       struct e1000_hw         hw;
+       struct e1000_hw_stats   stats;
+       struct e1000_interrupt  intr;
+       struct e1000_vfta       shadow_vfta;
+};
+
+#define E1000_DEV_PRIVATE_TO_HW(adapter) \
+       (&((struct e1000_adapter *)adapter)->hw)
+
+#define E1000_DEV_PRIVATE_TO_STATS(adapter) \
+       (&((struct e1000_adapter *)adapter)->stats)
+
+#define E1000_DEV_PRIVATE_TO_INTR(adapter) \
+       (&((struct e1000_adapter *)adapter)->intr)
+
+#define E1000_DEV_PRIVATE_TO_VFTA(adapter) \
+       (&((struct e1000_adapter *)adapter)->shadow_vfta)
+
+/*
+ * RX/TX function prototypes
+ */
+int igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
+int igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues);
+void igb_dev_clear_queues(struct rte_eth_dev *dev);
+
+int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+               uint16_t nb_rx_desc, unsigned int socket_id,
+               const struct rte_eth_rxconf *rx_conf,
+               struct rte_mempool *mb_pool);
+
+int eth_igb_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+               uint16_t nb_tx_desc, unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf);
+
+int eth_igb_rx_init(struct rte_eth_dev *dev);
+
+void eth_igb_tx_init(struct rte_eth_dev *dev);
+
+uint16_t eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts);
+
+uint16_t eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+#endif /* _E1000_ETHDEV_H_ */
diff --git a/lib/librte_pmd_igb/e1000_logs.h b/lib/librte_pmd_igb/e1000_logs.h
new file mode 100644 (file)
index 0000000..e0c50b5
--- /dev/null
@@ -0,0 +1,74 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _E1000_LOGS_H_
+#define _E1000_LOGS_H_
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IGB_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _E1000_LOGS_H_ */
diff --git a/lib/librte_pmd_igb/e1000_rxtx.c b/lib/librte_pmd_igb/e1000_rxtx.c
new file mode 100644 (file)
index 0000000..a891d12
--- /dev/null
@@ -0,0 +1,1859 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/queue.h>
+
+#include <endian.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <inttypes.h>
+
+#include <rte_interrupts.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+
+#include "e1000_logs.h"
+#include "igb/e1000_api.h"
+#include "e1000_ethdev.h"
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+
+       m = __rte_mbuf_raw_alloc(mp);
+       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       return (m);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+       (uint64_t) ((mb)->buf_physaddr +                   \
+                       (uint64_t) ((char *)((mb)->pkt.data) -     \
+                               (char *)(mb)->buf_addr))
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+       uint16_t next_id; /**< Index of next descriptor in ring. */
+       uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+       struct rte_mempool  *mb_pool;   /**< mbuf pool to populate RX ring. */
+       volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+       uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+       volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+       struct igb_rx_entry *sw_ring;   /**< address of RX software ring. */
+       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+       struct rte_mbuf *pkt_last_seg;  /**< Last segment of current packet. */
+       uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+       uint16_t            rx_tail;    /**< current value of RDT register. */
+       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+       uint16_t            queue_id;   /**< RX queue index. */
+       uint8_t             port_id;    /**< Device port identifier. */
+       uint8_t             pthresh;    /**< Prefetch threshold register. */
+       uint8_t             hthresh;    /**< Host threshold register. */
+       uint8_t             wthresh;    /**< Write-back threshold register. */
+       uint8_t             crc_len;    /**< 0 if CRC stripped, 4 otherwise. */
+};
+
+/**
+ * Hardware context number
+ */
+enum igb_advctx_num {
+       IGB_CTX_0    = 0, /**< CTX0    */
+       IGB_CTX_1    = 1, /**< CTX1    */
+       IGB_CTX_NUM  = 2, /**< CTX NUM */
+};
+
+/**
+ * Strucutre to check if new context need be built
+ */
+struct igb_advctx_info {
+       uint16_t flags;           /**< ol_flags related to context build. */
+       uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
+       uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+       volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
+       uint64_t               tx_ring_phys_addr; /**< TX ring DMA address. */
+       struct igb_tx_entry    *sw_ring; /**< virtual address of SW ring. */
+       volatile uint32_t      *tdt_reg_addr; /**< Address of TDT register. */
+       uint32_t               txd_type;      /**< Device-specific TXD type */
+       uint16_t               nb_tx_desc;    /**< number of TX descriptors. */
+       uint16_t               tx_tail;  /**< Current value of TDT register. */
+       uint16_t               tx_head;  /**< Index of first used TX descriptor. */
+       uint16_t               queue_id; /**< TX queue index. */
+       uint8_t                port_id;  /**< Device port identifier. */
+       uint8_t                pthresh;  /**< Prefetch threshold register. */
+       uint8_t                hthresh;  /**< Host threshold register. */
+       uint8_t                wthresh;  /**< Write-back threshold register. */
+       uint32_t               ctx_curr; /**< Current used hardware descriptor. */
+       uint32_t               ctx_start;/**< Start context position for transmit queue. */
+       struct igb_advctx_info ctx_cache[IGB_CTX_NUM];  /**< Hardware context history.*/
+};
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+#define rte_igb_prefetch(p)    rte_prefetch0(p)
+#else
+#define rte_igb_prefetch(p)    do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while(0)
+#endif
+
+/*********************************************************************
+ *
+ *  TX function
+ *
+ **********************************************************************/
+
+/*
+ * Advanced context descriptor are almost same between igb/ixgbe
+ * This is a separate function, looking for optimization opportunity here
+ * Rework required to go with the pre-defined values.
+ */
+
+static inline void
+igbe_set_xmit_ctx(struct igb_tx_queue* txq,
+               volatile struct e1000_adv_tx_context_desc *ctx_txd,
+               uint16_t ol_flags, uint32_t vlan_macip_lens)
+{
+       uint32_t type_tucmd_mlhl;
+       uint32_t mss_l4len_idx;
+       uint32_t ctx_idx, ctx_curr;
+       uint32_t cmp_mask;
+
+       ctx_curr = txq->ctx_curr;
+       ctx_idx = ctx_curr + txq->ctx_start;
+
+       cmp_mask = 0;
+       type_tucmd_mlhl = 0;
+
+       if (ol_flags & PKT_TX_VLAN_PKT) {
+               cmp_mask |= TX_VLAN_CMP_MASK;
+       }
+
+       if (ol_flags & PKT_TX_IP_CKSUM) {
+               type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
+               cmp_mask |= TX_MAC_LEN_CMP_MASK;
+       }
+
+       /* Specify which HW CTX to upload. */
+       mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
+       switch (ol_flags & PKT_TX_L4_MASK) {
+       case PKT_TX_UDP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_TCP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_SCTP_CKSUM:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       default:
+               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
+                               E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
+               break;
+       }
+
+       txq->ctx_cache[ctx_curr].flags           = ol_flags;
+       txq->ctx_cache[ctx_curr].cmp_mask        = cmp_mask;
+       txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+
+       ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+       ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+       ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+       ctx_txd->seqnum_seed     = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
+               uint32_t vlan_macip_lens)
+{
+       /* If match with the current context */
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* If match with the second context */
+       txq->ctx_curr ^= 1;
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* Mismatch, use the previous context */
+       return (IGB_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
+{
+       static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
+       static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
+       uint32_t tmp;
+
+       tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
+       tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+       return tmp;
+}
+
+static inline uint32_t
+tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
+{
+       static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
+       return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+}
+
+uint16_t
+eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+              uint16_t nb_pkts)
+{
+       struct igb_tx_entry *sw_ring;
+       struct igb_tx_entry *txe, *txn;
+       volatile union e1000_adv_tx_desc *txr;
+       volatile union e1000_adv_tx_desc *txd;
+       struct rte_mbuf     *tx_pkt;
+       struct rte_mbuf     *m_seg;
+       uint64_t buf_dma_addr;
+       uint32_t olinfo_status;
+       uint32_t cmd_type_len;
+       uint32_t pkt_len;
+       uint16_t slen;
+       uint16_t ol_flags;
+       uint16_t tx_end;
+       uint16_t tx_id;
+       uint16_t tx_last;
+       uint16_t nb_tx;
+       uint16_t tx_ol_req;
+       uint32_t new_ctx;
+       uint32_t ctx;
+       uint32_t vlan_macip_lens;
+
+       sw_ring = txq->sw_ring;
+       txr     = txq->tx_ring;
+       tx_id   = txq->tx_tail;
+       txe = &sw_ring[tx_id];
+
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               tx_pkt = *tx_pkts++;
+               pkt_len = tx_pkt->pkt.pkt_len;
+
+               RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+               /*
+                * The number of descriptors that must be allocated for a
+                * packet is the number of segments of that packet, plus 1
+                * Context Descriptor for the VLAN Tag Identifier, if any.
+                * Determine the last TX descriptor to allocate in the TX ring
+                * for the packet, starting from the current position (tx_id)
+                * in the ring.
+                */
+               tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
+
+               ol_flags = tx_pkt->ol_flags;
+               vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len;
+               tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
+
+               /* If a Context Descriptor need be built . */
+               if (tx_ol_req) {
+                       ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens);
+                       /* Only allocate context descriptor if required*/
+                       new_ctx = (ctx == IGB_CTX_NUM);
+                       ctx = txq->ctx_curr;
+                       tx_last = (uint16_t) (tx_last + new_ctx);
+               }
+               if (tx_last >= txq->nb_tx_desc)
+                       tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+               PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+                          " tx_first=%u tx_last=%u\n",
+                          (unsigned) txq->port_id,
+                          (unsigned) txq->queue_id,
+                          (unsigned) pkt_len,
+                          (unsigned) tx_id,
+                          (unsigned) tx_last);
+
+               /*
+                * Check if there are enough free descriptors in the TX ring
+                * to transmit the next packet.
+                * This operation is based on the two following rules:
+                *
+                *   1- Only check that the last needed TX descriptor can be
+                *      allocated (by construction, if that descriptor is free,
+                *      all intermediate ones are also free).
+                *
+                *      For this purpose, the index of the last TX descriptor
+                *      used for a packet (the "last descriptor" of a packet)
+                *      is recorded in the TX entries (the last one included)
+                *      that are associated with all TX descriptors allocated
+                *      for that packet.
+                *
+                *   2- Avoid to allocate the last free TX descriptor of the
+                *      ring, in order to never set the TDT register with the
+                *      same value stored in parallel by the NIC in the TDH
+                *      register, which makes the TX engine of the NIC enter
+                *      in a deadlock situation.
+                *
+                *      By extension, avoid to allocate a free descriptor that
+                *      belongs to the last set of free descriptors allocated
+                *      to the same packet previously transmitted.
+                */
+
+               /*
+                * The "last descriptor" of the previously sent packet, if any,
+                * which used the last descriptor to allocate.
+                */
+               tx_end = sw_ring[tx_last].last_id;
+
+               /*
+                * The next descriptor following that "last descriptor" in the
+                * ring.
+                */
+               tx_end = sw_ring[tx_end].next_id;
+
+               /*
+                * The "last descriptor" associated with that next descriptor.
+                */
+               tx_end = sw_ring[tx_end].last_id;
+
+               /*
+                * Check that this descriptor is free.
+                */
+               if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
+                       if (nb_tx == 0)
+                               return (0);
+                       goto end_of_tx;
+               }
+
+               /*
+                * Set common flags of all TX Data Descriptors.
+                *
+                * The following bits must be set in all Data Descriptors:
+                *   - E1000_ADVTXD_DTYP_DATA
+                *   - E1000_ADVTXD_DCMD_DEXT
+                *
+                * The following bits must be set in the first Data Descriptor
+                * and are ignored in the other ones:
+                *   - E1000_ADVTXD_DCMD_IFCS
+                *   - E1000_ADVTXD_MAC_1588
+                *   - E1000_ADVTXD_DCMD_VLE
+                *
+                * The following bits must only be set in the last Data
+                * Descriptor:
+                *   - E1000_TXD_CMD_EOP
+                *
+                * The following bits can be set in any Data Descriptor, but
+                * are only set in the last Data Descriptor:
+                *   - E1000_TXD_CMD_RS
+                */
+               cmd_type_len = txq->txd_type |
+                       E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+               olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
+#if defined(RTE_LIBRTE_IEEE1588)
+               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                       cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
+#endif
+               if (tx_ol_req) {
+                       /* Setup TX Advanced context descriptor if required */
+                       if (new_ctx) {
+                               volatile struct e1000_adv_tx_context_desc *
+                                   ctx_txd;
+
+                               ctx_txd = (volatile struct
+                                   e1000_adv_tx_context_desc *)
+                                   &txr[tx_id];
+
+                               txn = &sw_ring[txe->next_id];
+                               RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+                               if (txe->mbuf != NULL) {
+                                       rte_pktmbuf_free_seg(txe->mbuf);
+                                       txe->mbuf = NULL;
+                               }
+
+                               igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+                                   vlan_macip_lens);
+
+                               txe->last_id = tx_last;
+                               tx_id = txe->next_id;
+                               txe = txn;
+                       }
+
+                       /* Setup the TX Advanced Data Descriptor */
+                       cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+                       olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+                       olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
+               }
+
+               m_seg = tx_pkt;
+               do {
+                       txn = &sw_ring[txe->next_id];
+                       txd = &txr[tx_id];
+
+                       if (txe->mbuf != NULL)
+                               rte_pktmbuf_free_seg(txe->mbuf);
+                       txe->mbuf = m_seg;
+
+                       /*
+                        * Set up transmit descriptor.
+                        */
+                       slen = (uint16_t) m_seg->pkt.data_len;
+                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       txd->read.buffer_addr =
+                               rte_cpu_to_le_64(buf_dma_addr);
+                       txd->read.cmd_type_len =
+                               rte_cpu_to_le_32(cmd_type_len | slen);
+                       txd->read.olinfo_status =
+                               rte_cpu_to_le_32(olinfo_status);
+                       txe->last_id = tx_last;
+                       tx_id = txe->next_id;
+                       txe = txn;
+                       m_seg = m_seg->pkt.next;
+               } while (m_seg != NULL);
+
+               /*
+                * The last packet data descriptor needs End Of Packet (EOP)
+                * and Report Status (RS).
+                */
+               txd->read.cmd_type_len |=
+                       rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
+       }
+ end_of_tx:
+       rte_wmb();
+
+       /*
+        * Set the Transmit Descriptor Tail (TDT).
+        */
+       E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+       PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+                  (unsigned) txq->port_id, (unsigned) txq->queue_id,
+                  (unsigned) tx_id, (unsigned) nb_tx);
+       txq->tx_tail = tx_id;
+
+       return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+static inline uint16_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+       uint16_t pkt_flags;
+
+       static uint16_t ip_pkt_types_map[16] = {
+               0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+               PKT_RX_IPV6_HDR, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+       };
+
+#if defined(RTE_LIBRTE_IEEE1588)
+       static uint32_t ip_pkt_etqf_map[8] = {
+               0, 0, 0, PKT_RX_IEEE1588_PTP,
+               0, 0, 0, 0,
+       };
+
+       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
+                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#else
+       pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
+#endif
+       return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
+                                       PKT_RX_RSS_HASH);
+}
+
+static inline uint16_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+       uint16_t pkt_flags;
+
+       /* Check if VLAN present */
+       pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+
+#if defined(RTE_LIBRTE_IEEE1588)
+       if (rx_status & E1000_RXD_STAT_TMST)
+               pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+#endif
+       return pkt_flags;
+}
+
+static inline uint16_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+       /*
+        * Bit 30: IPE, IPv4 checksum error
+        * Bit 29: L4I, L4I integrity error
+        */
+
+       static uint16_t error_to_pkt_flags_map[4] = {
+               0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+       };
+       return error_to_pkt_flags_map[(rx_status >>
+               E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
+}
+
+uint16_t
+eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+              uint16_t nb_pkts)
+{
+       volatile union e1000_adv_rx_desc *rx_ring;
+       volatile union e1000_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union e1000_adv_rx_desc rxd;
+       uint64_t dma_addr;
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t pkt_len;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+       while (nb_rx < nb_pkts) {
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * End of packet.
+                *
+                * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
+                * likely to be invalid and to be dropped by the various
+                * validation checks performed by the network stack.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy do not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x pkt_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_igb_prefetch(&rx_ring[rx_id]);
+                       rte_igb_prefetch(&sw_ring[rx_id]);
+               }
+
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.hdr_addr = dma_addr;
+               rxdp->read.pkt_addr = dma_addr;
+
+               /*
+                * Initialize the returned mbuf.
+                * 1) setup generic mbuf fields:
+                *    - number of segments,
+                *    - next segment,
+                *    - packet length,
+                *    - RX port identifier.
+                * 2) integrate hardware offload data, if any:
+                *    - RSS flag & hash,
+                *    - IP checksum flag,
+                *    - VLAN TCI, if any,
+                *    - error flags.
+                */
+               pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+                                     rxq->crc_len);
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->pkt.data);
+               rxm->pkt.nb_segs = 1;
+               rxm->pkt.next = NULL;
+               rxm->pkt.pkt_len = pkt_len;
+               rxm->pkt.data_len = pkt_len;
+               rxm->pkt.in_port = rxq->port_id;
+
+               rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+               rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags |
+                                       rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags |
+                                       rx_desc_error_to_pkt_flags(staterr));
+               rxm->ol_flags = pkt_flags;
+
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = rxm;
+       }
+       rxq->rx_tail = rx_id;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+uint16_t
+eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+                        uint16_t nb_pkts)
+{
+       volatile union e1000_adv_rx_desc *rx_ring;
+       volatile union e1000_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *first_seg;
+       struct rte_mbuf *last_seg;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union e1000_adv_rx_desc rxd;
+       uint64_t dma; /* Physical address of mbuf data buffer */
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t data_len;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+
+       /*
+        * Retrieve RX context of current packet, if any.
+        */
+       first_seg = rxq->pkt_first_seg;
+       last_seg = rxq->pkt_last_seg;
+
+       while (nb_rx < nb_pkts) {
+       next_desc:
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * Descriptor done.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy does not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x data_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_igb_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_igb_prefetch(&rx_ring[rx_id]);
+                       rte_igb_prefetch(&sw_ring[rx_id]);
+               }
+
+               /*
+                * Update RX descriptor with the physical address of the new
+                * data buffer of the new allocated mbuf.
+                */
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.pkt_addr = dma;
+               rxdp->read.hdr_addr = dma;
+
+               /*
+                * Set data length & data buffer address of mbuf.
+                */
+               data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+               rxm->pkt.data_len = data_len;
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+               /*
+                * If this is the first buffer of the received packet,
+                * set the pointer to the first mbuf of the packet and
+                * initialize its context.
+                * Otherwise, update the total length and the number of segments
+                * of the current scattered packet, and update the pointer to
+                * the last mbuf of the current packet.
+                */
+               if (first_seg == NULL) {
+                       first_seg = rxm;
+                       first_seg->pkt.pkt_len = data_len;
+                       first_seg->pkt.nb_segs = 1;
+               } else {
+                       first_seg->pkt.pkt_len += data_len;
+                       first_seg->pkt.nb_segs++;
+                       last_seg->pkt.next = rxm;
+               }
+
+               /*
+                * If this is not the last buffer of the received packet,
+                * update the pointer to the last mbuf of the current scattered
+                * packet and continue to parse the RX ring.
+                */
+               if (! (staterr & E1000_RXD_STAT_EOP)) {
+                       last_seg = rxm;
+                       goto next_desc;
+               }
+
+               /*
+                * This is the last buffer of the received packet.
+                * If the CRC is not stripped by the hardware:
+                *   - Subtract the CRC length from the total packet length.
+                *   - If the last buffer only contains the whole CRC or a part
+                *     of it, free the mbuf associated to the last buffer.
+                *     If part of the CRC is also contained in the previous
+                *     mbuf, subtract the length of that CRC part from the
+                *     data length of the previous mbuf.
+                */
+               rxm->pkt.next = NULL;
+               if (unlikely(rxq->crc_len > 0)) {
+                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       if (data_len <= ETHER_CRC_LEN) {
+                               rte_pktmbuf_free_seg(rxm);
+                               first_seg->pkt.nb_segs--;
+                               last_seg->pkt.data_len = (uint16_t)
+                                       (last_seg->pkt.data_len -
+                                        (ETHER_CRC_LEN - data_len));
+                               last_seg->pkt.next = NULL;
+                       } else
+                               rxm->pkt.data_len =
+                                       (uint16_t) (data_len - ETHER_CRC_LEN);
+               }
+
+               /*
+                * Initialize the first mbuf of the returned packet:
+                *    - RX port identifier,
+                *    - hardware offload data, if any:
+                *      - RSS flag & hash,
+                *      - IP checksum flag,
+                *      - VLAN TCI, if any,
+                *      - error flags.
+                */
+               first_seg->pkt.in_port = rxq->port_id;
+               first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+
+               /*
+                * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+                * set in the pkt_flags field.
+                */
+               first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
+               first_seg->ol_flags = pkt_flags;
+
+               /* Prefetch data of first segment, if configured to do so. */
+               rte_packet_prefetch(first_seg->pkt.data);
+
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = first_seg;
+
+               /*
+                * Setup receipt context for a new packet.
+                */
+               first_seg = NULL;
+       }
+
+       /*
+        * Record index of the next RX descriptor to probe.
+        */
+       rxq->rx_tail = rx_id;
+
+       /*
+        * Save receive context.
+        */
+       rxq->pkt_first_seg = first_seg;
+       rxq->pkt_last_seg = last_seg;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
+ * This will also optimize cache line size effect.
+ * H/W supports up to cache line size 128.
+ */
+#define IGB_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
+ * desscriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_RING_DESC 32
+#define IGB_MAX_RING_DESC 4096
+
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+                     uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;
+
+       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+                       dev->driver->pci_drv.name, ring_name,
+                               dev->data->port_id, queue_id);
+       mz = rte_memzone_lookup(z_name);
+       if (mz)
+               return mz;
+
+       return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
+                       socket_id, 0, IGB_ALIGN);
+}
+
+static void
+igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+       unsigned i;
+
+       if (txq->sw_ring != NULL) {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+igb_tx_queue_release(struct igb_tx_queue *txq)
+{
+       igb_tx_queue_release_mbufs(txq);
+        rte_free(txq->sw_ring);
+        rte_free(txq);
+}
+
+int
+igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
+       struct igb_tx_queue **txq;
+
+       if (dev->data->tx_queues == NULL) {
+               dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+               if (dev->data->tx_queues == NULL) {
+                       dev->data->nb_tx_queues = 0;
+                       return -ENOMEM;
+               }
+       } else {
+               if (nb_queues < old_nb_queues)
+                       for (i = nb_queues; i < old_nb_queues; i++)
+                               igb_tx_queue_release(dev->data->tx_queues[i]);
+
+               if (nb_queues != old_nb_queues) {
+                       txq = rte_realloc(dev->data->tx_queues,
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+                       if (txq == NULL)
+                               return -ENOMEM;
+                       else
+                               dev->data->tx_queues = txq;
+                       if (nb_queues > old_nb_queues)
+                               memset(&(txq[old_nb_queues]), 0,
+                                       sizeof(struct igb_tx_queue *) *
+                                       (nb_queues - old_nb_queues));
+               }
+       }
+       dev->data->nb_tx_queues = nb_queues;
+
+       return 0;
+}
+
+static void
+igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
+{
+       txq->tx_head = 0;
+       txq->tx_tail = 0;
+       txq->ctx_curr = 0;
+       memset((void*)&txq->ctx_cache, 0,
+               IGB_CTX_NUM * sizeof(struct igb_advctx_info));
+}
+
+static void
+igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
+{
+       struct igb_tx_entry *txe = txq->sw_ring;
+       uint32_t size;
+       uint16_t i, prev;
+       struct e1000_hw *hw;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
+       /* Zero out HW ring memory */
+       for (i = 0; i < size; i++) {
+               ((volatile char *)txq->tx_ring)[i] = 0;
+       }
+
+       /* Initialize ring entries */
+       prev = txq->nb_tx_desc - 1;
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
+
+               txd->wb.status = E1000_TXD_STAT_DD;
+               txe[i].mbuf = NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->txd_type = E1000_ADVTXD_DTYP_DATA;
+       /* 82575 specific, each tx queue will use 2 hw contexts */
+       if (hw->mac.type == e1000_82575)
+               txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
+
+       igb_reset_tx_queue_stat(txq);
+}
+
+int
+eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_txconf *tx_conf)
+{
+       const struct rte_memzone *tz;
+       struct igb_tx_queue *txq;
+       struct e1000_hw     *hw;
+       uint32_t size;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of transmit descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IGB_ALIGN.
+        */
+       if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
+           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+               return -EINVAL;
+       }
+
+       /*
+        * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
+        * driver.
+        */
+       if (tx_conf->tx_free_thresh != 0)
+               RTE_LOG(WARNING, PMD,
+                       "The tx_free_thresh parameter is not "
+                       "used for the 1G driver.");
+       if (tx_conf->tx_rs_thresh != 0)
+               RTE_LOG(WARNING, PMD,
+                       "The tx_rs_thresh parameter is not "
+                       "used for the 1G driver.");
+       if (tx_conf->tx_thresh.wthresh == 0)
+               RTE_LOG(WARNING, PMD,
+                       "To improve 1G driver performance, consider setting "
+                       "the TX WTHRESH value to 4, 8, or 16.");
+
+       /* Free memory prior to re-allocation if needed */
+       if (dev->data->tx_queues[queue_idx] != NULL)
+               igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
+
+       /* First allocate the tx queue data structure */
+       txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
+                                                       CACHE_LINE_SIZE);
+       if (txq == NULL)
+               return (-ENOMEM);
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
+       tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                                       size, socket_id);
+       if (tz == NULL) {
+               igb_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+
+       txq->nb_tx_desc = nb_desc;
+       txq->pthresh = tx_conf->tx_thresh.pthresh;
+       txq->hthresh = tx_conf->tx_thresh.hthresh;
+       txq->wthresh = tx_conf->tx_thresh.wthresh;
+       txq->queue_id = queue_idx;
+       txq->port_id = dev->data->port_id;
+
+       txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
+       txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+       txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
+
+       size = sizeof(union e1000_adv_tx_desc) * nb_desc;
+
+       /* Allocate software ring */
+       txq->sw_ring = rte_zmalloc("txq->sw_ring",
+                                  sizeof(struct igb_tx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (txq->sw_ring == NULL) {
+               igb_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+       igb_reset_tx_queue(txq, dev);
+       dev->tx_pkt_burst = eth_igb_xmit_pkts;
+       dev->data->tx_queues[queue_idx] = txq;
+
+       return (0);
+}
+
+static void
+igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+       unsigned i;
+
+       if (rxq->sw_ring != NULL) {
+               for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       if (rxq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+                               rxq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+igb_rx_queue_release(struct igb_rx_queue *rxq)
+{
+       igb_rx_queue_release_mbufs(rxq);
+       rte_free(rxq->sw_ring);
+       rte_free(rxq);
+}
+
+int
+igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
+       struct igb_rx_queue **rxq;
+
+       if (dev->data->rx_queues == NULL) {
+               dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+               if (dev->data->rx_queues == NULL) {
+                       dev->data->nb_rx_queues = 0;
+                       return -ENOMEM;
+               }
+       } else {
+               for (i = nb_queues; i < old_nb_queues; i++) {
+                       igb_rx_queue_release(dev->data->rx_queues[i]);
+                       dev->data->rx_queues[i] = NULL;
+               }
+               if (nb_queues != old_nb_queues) {
+                       rxq = rte_realloc(dev->data->rx_queues,
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                                                       CACHE_LINE_SIZE);
+                       if (rxq == NULL)
+                               return -ENOMEM;
+                       else
+                               dev->data->rx_queues = rxq;
+                       if (nb_queues > old_nb_queues)
+                               memset(&(rxq[old_nb_queues]), 0,
+                                       sizeof(struct igb_rx_queue *) *
+                                       (nb_queues - old_nb_queues));
+               }
+       }
+       dev->data->nb_rx_queues = nb_queues;
+
+       return 0;
+}
+
+static void
+igb_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+       unsigned size;
+       unsigned i;
+
+       /* Zero out HW ring memory */
+       size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
+       for (i = 0; i < size; i++) {
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+       }
+
+       rxq->rx_tail = 0;
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+}
+
+int
+eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       const struct rte_memzone *rz;
+       struct igb_rx_queue *rxq;
+       struct e1000_hw     *hw;
+       unsigned int size;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of receive descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IGB_ALIGN.
+        */
+       if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
+           (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
+               return (-EINVAL);
+       }
+
+       /* Free memory prior to re-allocation if needed */
+       if (dev->data->rx_queues[queue_idx] != NULL) {
+               igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
+               dev->data->rx_queues[queue_idx] = NULL;
+       }
+
+       /* First allocate the RX queue data structure. */
+       rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
+                         CACHE_LINE_SIZE);
+       if (rxq == NULL)
+               return (-ENOMEM);
+       rxq->mb_pool = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->pthresh = rx_conf->rx_thresh.pthresh;
+       rxq->hthresh = rx_conf->rx_thresh.hthresh;
+       rxq->wthresh = rx_conf->rx_thresh.wthresh;
+       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       rxq->queue_id = queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+                                 ETHER_CRC_LEN);
+
+       /*
+        *  Allocate RX ring hardware descriptors. A memzone large enough to
+        *  handle the maximum ring size is allocated in order to allow for
+        *  resizing in later calls to the queue setup function.
+        */
+       size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
+       rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
+       if (rz == NULL) {
+               igb_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
+       rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+       rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
+
+       /* Allocate software ring. */
+       rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+                                  sizeof(struct igb_rx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (rxq->sw_ring == NULL) {
+               igb_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+       dev->data->rx_queues[queue_idx] = rxq;
+       igb_reset_rx_queue(rxq);
+
+       return 0;
+}
+
+void
+igb_dev_clear_queues(struct rte_eth_dev *dev)
+{
+       uint16_t i;
+       struct igb_tx_queue *txq;
+       struct igb_rx_queue *rxq;
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               igb_tx_queue_release_mbufs(txq);
+               igb_reset_tx_queue(txq, dev);
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               igb_rx_queue_release_mbufs(rxq);
+               igb_reset_rx_queue(rxq);
+       }
+}
+
+/**
+ * Receive Side Scaling (RSS).
+ * See section 7.1.1.7 in the following document:
+ *     "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source and
+ * destination ports of TCP/UDP headers, if any, of received packets are hashed
+ * against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ *     - 32-bit result of the Microsoft RSS hash function,
+ *     - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+igb_rss_disable(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+       uint32_t mrqc;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mrqc = E1000_READ_REG(hw, E1000_MRQC);
+       mrqc &= ~E1000_MRQC_ENABLE_MASK;
+       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+static void
+igb_rss_configure(struct rte_eth_dev *dev)
+{
+       struct e1000_hw *hw;
+       uint8_t *hash_key;
+       uint32_t rss_key;
+       uint32_t mrqc;
+       uint32_t shift;
+       uint16_t rss_hf;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+       if (rss_hf == 0) /* Disable RSS. */ {
+               igb_rss_disable(dev);
+               return;
+       }
+       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+       if (hash_key == NULL)
+               hash_key = rss_intel_key; /* Default hash key. */
+
+       /* Fill in RSS hash key. */
+       for (i = 0; i < 10; i++) {
+               rss_key  = hash_key[(i * 4)];
+               rss_key |= hash_key[(i * 4) + 1] << 8;
+               rss_key |= hash_key[(i * 4) + 2] << 16;
+               rss_key |= hash_key[(i * 4) + 3] << 24;
+               E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
+       }
+
+       /* Fill in redirection table. */
+       shift = (hw->mac.type == e1000_82575) ? 6 : 0;
+       for (i = 0; i < 128; i++) {
+               union e1000_reta {
+                       uint32_t dword;
+                       uint8_t  bytes[4];
+               } reta;
+               uint8_t q_idx;
+
+               q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
+                                  i % dev->data->nb_rx_queues : 0);
+               reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
+               if ((i & 3) == 3)
+                       E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
+       }
+
+       /* Set configured hashing functions in MRQC register. */
+       mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
+       if (rss_hf & ETH_RSS_IPV4)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
+       if (rss_hf & ETH_RSS_IPV4_TCP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
+       if (rss_hf & ETH_RSS_IPV6)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
+       if (rss_hf & ETH_RSS_IPV6_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
+       if (rss_hf & ETH_RSS_IPV6_TCP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
+       if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+       if (rss_hf & ETH_RSS_IPV4_UDP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+               mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
+       E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+}
+
+/*********************************************************************
+ *
+ *  Enable receive unit.
+ *
+ **********************************************************************/
+
+static int
+igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+       struct igb_rx_entry *rxe = rxq->sw_ring;
+       uint64_t dma_addr;
+       unsigned i;
+
+       /* Initialize software ring entries. */
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               volatile union e1000_adv_rx_desc *rxd;
+               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+
+               if (mbuf == NULL) {
+                       PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
+                               "queue_id=%hu\n", rxq->queue_id);
+                       igb_rx_queue_release(rxq);
+                       return (-ENOMEM);
+               }
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+               rxd = &rxq->rx_ring[i];
+               rxd->read.hdr_addr = dma_addr;
+               rxd->read.pkt_addr = dma_addr;
+               rxe[i].mbuf = mbuf;
+       }
+
+       return 0;
+}
+
+int
+eth_igb_rx_init(struct rte_eth_dev *dev)
+{
+       struct e1000_hw     *hw;
+       struct igb_rx_queue *rxq;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       uint32_t rctl;
+       uint32_t rxcsum;
+       uint32_t srrctl;
+       uint16_t buf_size;
+       uint16_t rctl_bsize;
+       uint16_t i;
+       int ret;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       srrctl = 0;
+
+       /*
+        * Make sure receives are disabled while setting
+        * up the descriptor ring.
+        */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+       /*
+        * Configure support of jumbo frames, if any.
+        */
+       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+               rctl |= E1000_RCTL_LPE;
+
+               /* Set maximum packet length. */
+               E1000_WRITE_REG(hw, E1000_RLPML,
+                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       } else
+               rctl &= ~E1000_RCTL_LPE;
+
+       /* Configure and enable each RX queue. */
+       rctl_bsize = 0;
+       dev->rx_pkt_burst = eth_igb_recv_pkts;
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               uint64_t bus_addr;
+               uint32_t rxdctl;
+
+               rxq = dev->data->rx_queues[i];
+
+               /* Allocate buffers for descriptor rings and set up queue */
+               ret = igb_alloc_rx_queue_mbufs(rxq);
+               if (ret) {
+                       igb_dev_clear_queues(dev);
+                       return ret;
+               }
+
+               /*
+                * Reset crc_len in case it was changed after queue setup by a
+                *  call to configure
+                */
+               rxq->crc_len =
+                       (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
+                                                       0 : ETHER_CRC_LEN);
+
+               bus_addr = rxq->rx_ring_phys_addr;
+               E1000_WRITE_REG(hw, E1000_RDLEN(i),
+                               rxq->nb_rx_desc *
+                               sizeof(union e1000_adv_rx_desc));
+               E1000_WRITE_REG(hw, E1000_RDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
+
+               srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               /*
+                * Configure RX buffer size.
+                */
+               mbp_priv = (struct rte_pktmbuf_pool_private *)
+                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+                                      RTE_PKTMBUF_HEADROOM);
+               if (buf_size >= 1024) {
+                       /*
+                        * Configure the BSIZEPACKET field of the SRRCTL
+                        * register of the queue.
+                        * Value is in 1 KB resolution, from 1 KB to 127 KB.
+                        * If this field is equal to 0b, then RCTL.BSIZE
+                        * determines the RX packet buffer size.
+                        */
+                       srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
+                                  E1000_SRRCTL_BSIZEPKT_MASK);
+                       buf_size = (uint16_t) ((srrctl &
+                                               E1000_SRRCTL_BSIZEPKT_MASK) <<
+                                              E1000_SRRCTL_BSIZEPKT_SHIFT);
+
+                       if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+                               dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                               dev->data->scattered_rx = 1;
+                       }
+               } else {
+                       /*
+                        * Use BSIZE field of the device RCTL register.
+                        */
+                       if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
+                               rctl_bsize = buf_size;
+                       dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
+                       dev->data->scattered_rx = 1;
+               }
+
+               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+
+               /* Enable this RX queue. */
+               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+               rxdctl &= 0xFFF00000;
+               rxdctl |= (rxq->pthresh & 0x1F);
+               rxdctl |= ((rxq->hthresh & 0x1F) << 8);
+               rxdctl |= ((rxq->wthresh & 0x1F) << 16);
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+       }
+
+       /*
+        * Setup BSIZE field of RCTL register, if needed.
+        * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
+        * register, since the code above configures the SRRCTL register of
+        * the RX queue in such a case.
+        * All configurable sizes are:
+        * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
+        *  8192: rctl |= (E1000_RCTL_SZ_8192  | E1000_RCTL_BSEX);
+        *  4096: rctl |= (E1000_RCTL_SZ_4096  | E1000_RCTL_BSEX);
+        *  2048: rctl |= E1000_RCTL_SZ_2048;
+        *  1024: rctl |= E1000_RCTL_SZ_1024;
+        *   512: rctl |= E1000_RCTL_SZ_512;
+        *   256: rctl |= E1000_RCTL_SZ_256;
+        */
+       if (rctl_bsize > 0) {
+               if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
+                       rctl |= E1000_RCTL_SZ_512;
+               else /* 256 <= buf_size < 512 - use 256 */
+                       rctl |= E1000_RCTL_SZ_256;
+       }
+
+       /*
+        * Configure RSS if device configured with multiple RX queues.
+        */
+       if (dev->data->nb_rx_queues > 1)
+               igb_rss_configure(dev);
+       else
+               igb_rss_disable(dev);
+
+       /*
+        * Setup the Checksum Register.
+        * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
+        */
+       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+       rxcsum |= E1000_RXCSUM_PCSD;
+
+       /* Enable both L3/L4 rx checksum offload */
+       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+               rxcsum |= (E1000_RXCSUM_IPOFL  | E1000_RXCSUM_TUOFL);
+       else
+               rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
+       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+       /* Setup the Receive Control Register. */
+       if (dev->data->dev_conf.rxmode.hw_strip_crc) {
+               rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
+
+               /* set STRCRC bit in all queues for Powerville */
+               if (hw->mac.type == e1000_i350) {
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+                               dvmolr |= E1000_DVMOLR_STRCRC;
+                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
+                       }
+               }
+
+       } else {
+               rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
+
+               /* clear STRCRC bit in all queues for Powerville */
+               if (hw->mac.type == e1000_i350) {
+                       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                               uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
+                               dvmolr &= ~E1000_DVMOLR_STRCRC;
+                               E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
+                       }
+               }
+       }
+
+       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+               E1000_RCTL_RDMTS_HALF |
+               (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+       /* Make sure VLAN Filters are off. */
+       rctl &= ~E1000_RCTL_VFE;
+       /* Don't store bad packets. */
+       rctl &= ~E1000_RCTL_SBP;
+
+       /* Enable Receives. */
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+       /*
+        * Setup the HW Rx Head and Tail Descriptor Pointers.
+        * This needs to be done after enable.
+        */
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+               E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
+       }
+
+       return 0;
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+void
+eth_igb_tx_init(struct rte_eth_dev *dev)
+{
+       struct e1000_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint32_t tctl;
+       uint32_t txdctl;
+       uint16_t i;
+
+       hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings. */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               uint64_t bus_addr;
+               txq = dev->data->tx_queues[i];
+               bus_addr = txq->tx_ring_phys_addr;
+
+               E1000_WRITE_REG(hw, E1000_TDLEN(i),
+                               txq->nb_tx_desc *
+                               sizeof(union e1000_adv_tx_desc));
+               E1000_WRITE_REG(hw, E1000_TDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
+
+               /* Setup the HW Tx Head and Tail descriptor pointers. */
+               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+               /* Setup Transmit threshold registers. */
+               txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
+               txdctl |= txq->pthresh & 0x1F;
+               txdctl |= ((txq->hthresh & 0x1F) << 8);
+               txdctl |= ((txq->wthresh & 0x1F) << 16);
+               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+       }
+
+       /* Program the Transmit Control Register. */
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       tctl &= ~E1000_TCTL_CT;
+       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+       e1000_config_collision_dist(hw);
+
+       /* This write will effectively turn on the transmit unit. */
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
diff --git a/lib/librte_pmd_igb/igb/README b/lib/librte_pmd_igb/igb/README
new file mode 100644 (file)
index 0000000..5a5658e
--- /dev/null
@@ -0,0 +1,74 @@
+..
+  BSD LICENSE
+
+  Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions 
+  are met:
+
+    * Redistributions of source code must retain the above copyright 
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in 
+      the documentation and/or other materials provided with the 
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived 
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ version: DPDK.L.1.2.3-3
+
+Intel® IGB driver
+=================
+
+This directory contains code from the Intel® Network Adapter Driver for 82575/6
+and 82580-based Gigabit Network Connections under FreeBSD, version 2.2.3,
+dated 04/25/2011. This code is available from
+`http://downloadmirror.intel.com/15815/eng/igb-2.2.3.tar.gz`
+
+This driver is valid for the product(s) listed below
+
+* Intel® 82575EB Gigabit Ethernet Controller
+* Intel® 82576 Gigabit Ethernet Controller
+* Intel® 82580EB Gigabit Ethernet Controller
+* Intel® Ethernet Controller I350
+* Intel® Ethernet Server Adapter I340-F4
+* Intel® Ethernet Server Adapter I340-T4
+* Intel® Ethernet Server Adapter I350-F2
+* Intel® Ethernet Server Adapter I350-F4
+* Intel® Ethernet Server Adapter I350-T2
+* Intel® Ethernet Server Adapter I350-T4
+* Intel® Gigabit EF Dual Port Server Adapter
+* Intel® Gigabit ET Dual Port Server Adapter
+* Intel® Gigabit ET Quad Port Server Adapter
+* Intel® Gigabit ET2 Quad Port Server Adapter
+* Intel® Gigabit VT Quad Port Server Adapter
+
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel® DPDK:
+
+
+e1000_osdep.h and e1000_osdep.c
+-------------------------------
+
+The OS dependency layer has been extensively modified to support the drivers in
+the Intel® DPDK environment. It is expected that these files will not need to be
+changed on updating the driver.
diff --git a/lib/librte_pmd_igb/igb/e1000_82575.c b/lib/librte_pmd_igb/igb/e1000_82575.c
new file mode 100644 (file)
index 0000000..b2f1fca
--- /dev/null
@@ -0,0 +1,2429 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+/*
+ * 82575EB Gigabit Network Connection
+ * 82575EB Gigabit Backplane Connection
+ * 82575GB Gigabit Network Connection
+ * 82576 Gigabit Network Connection
+ * 82576 Quad Port Gigabit Mezzanine Adapter
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82575(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_phy_82575(struct e1000_hw *hw);
+static void e1000_release_phy_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_82575(struct e1000_hw *hw);
+static void e1000_release_nvm_82575(struct e1000_hw *hw);
+static s32  e1000_check_for_link_82575(struct e1000_hw *hw);
+static s32  e1000_get_cfg_done_82575(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                         u16 *duplex);
+static s32  e1000_init_hw_82575(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 *data);
+static s32  e1000_reset_hw_82575(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82580(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_82580(struct e1000_hw *hw,
+                                    u32 offset, u16 *data);
+static s32  e1000_write_phy_reg_82580(struct e1000_hw *hw,
+                                     u32 offset, u16 data);
+static s32  e1000_set_d0_lplu_state_82580(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_set_d3_lplu_state_82580(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_set_d0_lplu_state_82575(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_setup_copper_link_82575(struct e1000_hw *hw);
+static s32  e1000_setup_serdes_link_82575(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data);
+static s32  e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw,
+                                            u32 offset, u16 data);
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw);
+static s32  e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static s32  e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+                                                 u16 *speed, u16 *duplex);
+static s32  e1000_get_phy_id_82575(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask);
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw);
+static s32  e1000_reset_init_script_82575(struct e1000_hw *hw);
+static s32  e1000_read_mac_addr_82575(struct e1000_hw *hw);
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw);
+static void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw);
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw);
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+                                               u16 offset);
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw);
+
+static const u16 e1000_82580_rxpbs_table[] =
+       { 36, 72, 144, 1, 2, 4, 8, 16,
+         35, 70, 140 };
+#define E1000_82580_RXPBS_TABLE_SIZE \
+       (sizeof(e1000_82580_rxpbs_table)/sizeof(u16))
+
+
+/**
+ *  e1000_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool e1000_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+       u32 reg = 0;
+       bool ext_mdio = FALSE;
+
+       DEBUGFUNC("e1000_sgmii_uses_mdio_82575");
+
+       switch (hw->mac.type) {
+       case e1000_82575:
+       case e1000_82576:
+               reg = E1000_READ_REG(hw, E1000_MDIC);
+               ext_mdio = !!(reg & E1000_MDIC_DEST);
+               break;
+       case e1000_82580:
+       case e1000_i350:
+               reg = E1000_READ_REG(hw, E1000_MDICNFG);
+               ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+               break;
+       default:
+               break;
+       }
+       return ext_mdio;
+}
+
+/**
+ *  e1000_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u32 ctrl_ext;
+
+       DEBUGFUNC("e1000_init_phy_params_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper) {
+               phy->type = e1000_phy_none;
+               goto out;
+       }
+
+       phy->ops.power_up   = e1000_power_up_phy_copper;
+       phy->ops.power_down = e1000_power_down_phy_copper_82575;
+
+       phy->autoneg_mask           = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+       phy->reset_delay_us         = 100;
+
+       phy->ops.acquire            = e1000_acquire_phy_82575;
+       phy->ops.check_reset_block  = e1000_check_reset_block_generic;
+       phy->ops.commit             = e1000_phy_sw_reset_generic;
+       phy->ops.get_cfg_done       = e1000_get_cfg_done_82575;
+       phy->ops.release            = e1000_release_phy_82575;
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+
+       if (e1000_sgmii_active_82575(hw)) {
+               phy->ops.reset      = e1000_phy_hw_reset_sgmii_82575;
+               ctrl_ext |= E1000_CTRL_I2C_ENA;
+       } else {
+               phy->ops.reset      = e1000_phy_hw_reset_generic;
+               ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+       e1000_reset_mdicnfg_82580(hw);
+
+       if (e1000_sgmii_active_82575(hw) && !e1000_sgmii_uses_mdio_82575(hw)) {
+               phy->ops.read_reg   = e1000_read_phy_reg_sgmii_82575;
+               phy->ops.write_reg  = e1000_write_phy_reg_sgmii_82575;
+       } else if (hw->mac.type >= e1000_82580) {
+               phy->ops.read_reg   = e1000_read_phy_reg_82580;
+               phy->ops.write_reg  = e1000_write_phy_reg_82580;
+       } else {
+               phy->ops.read_reg   = e1000_read_phy_reg_igp;
+               phy->ops.write_reg  = e1000_write_phy_reg_igp;
+       }
+
+       /* Set phy->phy_addr and phy->id. */
+       ret_val = e1000_get_phy_id_82575(hw);
+
+       /* Verify phy id and set remaining function pointers */
+       switch (phy->id) {
+       case I347AT4_E_PHY_ID:
+       case M88E1112_E_PHY_ID:
+       case M88E1340M_E_PHY_ID:
+       case M88E1111_I_PHY_ID:
+               phy->type                   = e1000_phy_m88;
+               phy->ops.check_polarity     = e1000_check_polarity_m88;
+               phy->ops.get_info           = e1000_get_phy_info_m88;
+               if (phy->id == I347AT4_E_PHY_ID ||
+                   phy->id == M88E1112_E_PHY_ID ||
+                   phy->id == M88E1340M_E_PHY_ID)
+                       phy->ops.get_cable_length = e1000_get_cable_length_m88_gen2;
+               else
+                       phy->ops.get_cable_length = e1000_get_cable_length_m88;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+               break;
+       case IGP03E1000_E_PHY_ID:
+       case IGP04E1000_E_PHY_ID:
+               phy->type                   = e1000_phy_igp_3;
+               phy->ops.check_polarity     = e1000_check_polarity_igp;
+               phy->ops.get_info           = e1000_get_phy_info_igp;
+               phy->ops.get_cable_length   = e1000_get_cable_length_igp_2;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82575;
+               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
+               break;
+       case I82580_I_PHY_ID:
+       case I350_I_PHY_ID:
+               phy->type                   = e1000_phy_82580;
+               phy->ops.check_polarity     = e1000_check_polarity_82577;
+               phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_82577;
+               phy->ops.get_cable_length   = e1000_get_cable_length_82577;
+               phy->ops.get_info           = e1000_get_phy_info_82577;
+               phy->ops.set_d0_lplu_state  = e1000_set_d0_lplu_state_82580;
+               phy->ops.set_d3_lplu_state  = e1000_set_d3_lplu_state_82580;
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       u16 size;
+
+       DEBUGFUNC("e1000_init_nvm_params_82575");
+
+       size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                    E1000_EECD_SIZE_EX_SHIFT);
+       /*
+        * Added to a constant, "size" becomes the left-shift value
+        * for setting word_size.
+        */
+       size += NVM_WORD_SIZE_BASE_SHIFT;
+
+       nvm->word_size = 1 << size;
+       nvm->opcode_bits        = 8;
+       nvm->delay_usec         = 1;
+       switch (nvm->override) {
+       case e1000_nvm_override_spi_large:
+               nvm->page_size    = 32;
+               nvm->address_bits = 16;
+               break;
+       case e1000_nvm_override_spi_small:
+               nvm->page_size    = 8;
+               nvm->address_bits = 8;
+               break;
+       default:
+               nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+               nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+               break;
+       }
+
+       nvm->type = e1000_nvm_eeprom_spi;
+
+       if (nvm->word_size == (1 << 15))
+               nvm->page_size = 128;
+
+       /* Function Pointers */
+       nvm->ops.acquire    = e1000_acquire_nvm_82575;
+       nvm->ops.release    = e1000_release_nvm_82575;
+       if (nvm->word_size < (1 << 15))
+               nvm->ops.read    = e1000_read_nvm_eerd;
+       else
+               nvm->ops.read    = e1000_read_nvm_spi;
+
+       nvm->ops.write              = e1000_write_nvm_spi;
+       nvm->ops.validate           = e1000_validate_nvm_checksum_generic;
+       nvm->ops.update             = e1000_update_nvm_checksum_generic;
+       nvm->ops.valid_led_default  = e1000_valid_led_default_82575;
+
+       /* override genric family function pointers for specific descendants */
+       switch (hw->mac.type) {
+       case e1000_82580:
+               nvm->ops.validate = e1000_validate_nvm_checksum_82580;
+               nvm->ops.update = e1000_update_nvm_checksum_82580;
+               break;
+       case e1000_i350:
+               nvm->ops.validate = e1000_validate_nvm_checksum_i350;
+               nvm->ops.update = e1000_update_nvm_checksum_i350;
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       u32 ctrl_ext = 0;
+
+       DEBUGFUNC("e1000_init_mac_params_82575");
+
+       /* Set media type */
+        /*
+        * The 82575 uses bits 22:23 for link mode. The mode can be changed
+         * based on the EEPROM. We cannot rely upon device ID. There
+         * is no distinguishable difference between fiber and internal
+         * SerDes mode on the 82575. There can be an external PHY attached
+         * on the SGMII interface. For this, we'll set sgmii_active to TRUE.
+         */
+       hw->phy.media_type = e1000_media_type_copper;
+       dev_spec->sgmii_active = FALSE;
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               dev_spec->sgmii_active = TRUE;
+               break;
+       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+       case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+               hw->phy.media_type = e1000_media_type_internal_serdes;
+               break;
+       default:
+               break;
+       }
+
+       /* Set mta register count */
+       mac->mta_reg_count = 128;
+       /* Set uta register count */
+       mac->uta_reg_count = (hw->mac.type == e1000_82575) ? 0 : 128;
+       /* Set rar entry count */
+       mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+       if (mac->type == e1000_82576)
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+       if (mac->type == e1000_82580)
+               mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+       if (mac->type == e1000_i350) {
+               mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+               /* Enable EEE default settings for i350 */
+               dev_spec->eee_disable = FALSE;
+       }
+
+       /* Set if part includes ASF firmware */
+       mac->asf_firmware_present = TRUE;
+       /* FWSM register */
+       mac->has_fwsm = TRUE;
+       /* ARC supported; valid only if manageability features are enabled. */
+       mac->arc_subsystem_valid =
+               (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+                       ? TRUE : FALSE;
+
+       /* Function pointers */
+
+       /* bus type/speed/width */
+       mac->ops.get_bus_info = e1000_get_bus_info_pcie_generic;
+       /* reset */
+       if (mac->type >= e1000_82580)
+               mac->ops.reset_hw = e1000_reset_hw_82580;
+       else
+       mac->ops.reset_hw = e1000_reset_hw_82575;
+       /* hw initialization */
+       mac->ops.init_hw = e1000_init_hw_82575;
+       /* link setup */
+       mac->ops.setup_link = e1000_setup_link_generic;
+       /* physical interface link setup */
+       mac->ops.setup_physical_interface =
+               (hw->phy.media_type == e1000_media_type_copper)
+                       ? e1000_setup_copper_link_82575
+                       : e1000_setup_serdes_link_82575;
+       /* physical interface shutdown */
+       mac->ops.shutdown_serdes = e1000_shutdown_serdes_link_82575;
+       /* physical interface power up */
+       mac->ops.power_up_serdes = e1000_power_up_serdes_link_82575;
+       /* check for link */
+       mac->ops.check_for_link = e1000_check_for_link_82575;
+       /* receive address register setting */
+       mac->ops.rar_set = e1000_rar_set_generic;
+       /* read mac address */
+       mac->ops.read_mac_addr = e1000_read_mac_addr_82575;
+       /* configure collision distance */
+       mac->ops.config_collision_dist = e1000_config_collision_dist_82575;
+       /* multicast address update */
+       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_generic;
+       /* writing VFTA */
+       mac->ops.write_vfta = e1000_write_vfta_generic;
+       /* clearing VFTA */
+       mac->ops.clear_vfta = e1000_clear_vfta_generic;
+       /* ID LED init */
+       mac->ops.id_led_init = e1000_id_led_init_generic;
+       /* blink LED */
+       mac->ops.blink_led = e1000_blink_led_generic;
+       /* setup LED */
+       mac->ops.setup_led = e1000_setup_led_generic;
+       /* cleanup LED */
+       mac->ops.cleanup_led = e1000_cleanup_led_generic;
+       /* turn on/off LED */
+       mac->ops.led_on = e1000_led_on_generic;
+       mac->ops.led_off = e1000_led_off_generic;
+       /* clear hardware counters */
+       mac->ops.clear_hw_cntrs = e1000_clear_hw_cntrs_82575;
+       /* link info */
+       mac->ops.get_link_up_info = e1000_get_link_up_info_82575;
+
+       /* set lan id for port to determine which phy lock to use */
+       hw->mac.ops.set_lan_id(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_function_pointers_82575 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to initialize all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_function_pointers_82575");
+
+       hw->mac.ops.init_params = e1000_init_mac_params_82575;
+       hw->nvm.ops.init_params = e1000_init_nvm_params_82575;
+       hw->phy.ops.init_params = e1000_init_phy_params_82575;
+       hw->mbx.ops.init_params = e1000_init_mbx_params_pf;
+}
+
+/**
+ *  e1000_acquire_phy_82575 - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_82575(struct e1000_hw *hw)
+{
+       u16 mask = E1000_SWFW_PHY0_SM;
+
+       DEBUGFUNC("e1000_acquire_phy_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_SWFW_PHY1_SM;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_SWFW_PHY2_SM;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_SWFW_PHY3_SM;
+
+       return e1000_acquire_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_82575 - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_82575(struct e1000_hw *hw)
+{
+       u16 mask = E1000_SWFW_PHY0_SM;
+
+       DEBUGFUNC("e1000_release_phy_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_SWFW_PHY1_SM;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_SWFW_PHY2_SM;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_SWFW_PHY3_SM;
+
+       e1000_release_swfw_sync_82575(hw, mask);
+}
+
+/**
+ *  e1000_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the serial gigabit media independent
+ *  interface and stores the retrieved information in data.
+ **/
+static s32 e1000_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                          u16 *data)
+{
+       s32 ret_val = -E1000_ERR_PARAM;
+
+       DEBUGFUNC("e1000_read_phy_reg_sgmii_82575");
+
+       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+               DEBUGOUT1("PHY Address %u is out of range\n", offset);
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_i2c(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the serial gigabit
+ *  media independent interface.
+ **/
+static s32 e1000_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+                                           u16 data)
+{
+       s32 ret_val = -E1000_ERR_PARAM;
+
+       DEBUGFUNC("e1000_write_phy_reg_sgmii_82575");
+
+       if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_i2c(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_id_82575 - Retrieve PHY addr and id
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieves the PHY address and ID for both PHY's which do and do not use
+ *  sgmi interface.
+ **/
+static s32 e1000_get_phy_id_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32  ret_val = E1000_SUCCESS;
+       u16 phy_id;
+       u32 ctrl_ext;
+       u32 mdic;
+
+       DEBUGFUNC("e1000_get_phy_id_82575");
+
+       /*
+        * For SGMII PHYs, we try the list of possible addresses until
+        * we find one that works.  For non-SGMII PHYs
+        * (e.g. integrated copper PHYs), an address of 1 should
+        * work.  The result of this function should mean phy->phy_addr
+        * and phy->id are set correctly.
+        */
+       if (!e1000_sgmii_active_82575(hw)) {
+               phy->addr = 1;
+               ret_val = e1000_get_phy_id(hw);
+               goto out;
+       }
+
+       if (e1000_sgmii_uses_mdio_82575(hw)) {
+               switch (hw->mac.type) {
+               case e1000_82575:
+               case e1000_82576:
+                       mdic = E1000_READ_REG(hw, E1000_MDIC);
+                       mdic &= E1000_MDIC_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+                       break;
+               case e1000_82580:
+               case e1000_i350:
+                       mdic = E1000_READ_REG(hw, E1000_MDICNFG);
+                       mdic &= E1000_MDICNFG_PHY_MASK;
+                       phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+                       break;
+               default:
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+                       break;
+               }
+               ret_val = e1000_get_phy_id(hw);
+               goto out;
+       }
+
+       /* Power on sgmii phy if it is disabled */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT,
+                       ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(300);
+
+       /*
+        * The address field in the I2CCMD register is 3 bits and 0 is invalid.
+        * Therefore, we need to test 1-7
+        */
+       for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+               ret_val = e1000_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+               if (ret_val == E1000_SUCCESS) {
+                       DEBUGOUT2("Vendor ID 0x%08X read at address %u\n",
+                                 phy_id,
+                                 phy->addr);
+                       /*
+                        * At the time of this writing, The M88 part is
+                        * the only supported SGMII PHY product.
+                        */
+                       if (phy_id == M88_VENDOR)
+                               break;
+               } else {
+                       DEBUGOUT1("PHY address %u was unreadable\n",
+                                 phy->addr);
+               }
+       }
+
+       /* A valid PHY type couldn't be found. */
+       if (phy->addr == 8) {
+               phy->addr = 0;
+               ret_val = -E1000_ERR_PHY;
+       } else {
+               ret_val = e1000_get_phy_id(hw);
+       }
+
+       /* restore previous sfp cage power state */
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 e1000_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_phy_hw_reset_sgmii_82575");
+
+       /*
+        * This isn't a TRUE "hard" reset, but is the only reset
+        * available to us at this time.
+        */
+
+       DEBUGOUT("Soft resetting SGMII attached PHY...\n");
+
+       if (!(hw->phy.ops.write_reg))
+               goto out;
+
+       /*
+        * SFP documentation requires the following to configure the SPF module
+        * to work on SGMII.  No further documentation is given.
+        */
+       ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+       if (ret_val)
+               goto out;
+
+       ret_val = hw->phy.ops.commit(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d0_lplu_state_82575");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+       if (ret_val)
+               goto out;
+
+       if (active) {
+               data |= IGP02E1000_PM_D0_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               if (ret_val)
+                       goto out;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                           &data);
+               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                            data);
+               if (ret_val)
+                       goto out;
+       } else {
+               data &= ~IGP02E1000_PM_D0_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data |= IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d0_lplu_state_82580");
+
+       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+       if (active) {
+               data |= E1000_82580_PM_D0_LPLU;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               data &= ~E1000_82580_PM_SPD;
+       } else {
+               data &= ~E1000_82580_PM_D0_LPLU;
+
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       data |= E1000_82580_PM_SPD;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       data &= ~E1000_82580_PM_SPD;
+               }
+       }
+
+       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+       return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d3_lplu_state_82580");
+
+       data = E1000_READ_REG(hw, E1000_82580_PHY_POWER_MGMT);
+
+       if (!active) {
+               data &= ~E1000_82580_PM_D3_LPLU;
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       data |= E1000_82580_PM_SPD;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       data &= ~E1000_82580_PM_SPD;
+               }
+       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+               data |= E1000_82580_PM_D3_LPLU;
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               data &= ~E1000_82580_PM_SPD;
+       }
+
+       E1000_WRITE_REG(hw, E1000_82580_PHY_POWER_MGMT, data);
+       return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_82575 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 e1000_acquire_nvm_82575(struct e1000_hw *hw)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_acquire_nvm_82575");
+
+       ret_val = e1000_acquire_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Check if there is some access
+        * error this access may hook on
+        */
+       if (hw->mac.type == e1000_i350) {
+               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+               if (eecd & (E1000_EECD_BLOCKED | E1000_EECD_ABORT |
+                   E1000_EECD_TIMEOUT)) {
+                       /* Clear all access error flags */
+                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
+                                       E1000_EECD_ERROR_CLR);
+                       DEBUGOUT("Nvm bit banging access error"
+                               " detected and cleared.\n");
+               }
+       }
+       if (hw->mac.type == e1000_82580) {
+               u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+               if (eecd & E1000_EECD_BLOCKED) {
+                       /* Clear access error flag */
+                       E1000_WRITE_REG(hw, E1000_EECD, eecd |
+                                       E1000_EECD_BLOCKED);
+                       DEBUGOUT("Nvm bit banging access"
+                               " error detected and cleared.\n");
+               }
+       }
+
+       ret_val = e1000_acquire_nvm_generic(hw);
+       if (ret_val)
+               e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82575 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void e1000_release_nvm_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_release_nvm_82575");
+
+       e1000_release_swfw_sync_82575(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+       u32 fwmask = mask << 16;
+       s32 ret_val = E1000_SUCCESS;
+       s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+       DEBUGFUNC("e1000_acquire_swfw_sync_82575");
+
+       while (i < timeout) {
+               if (e1000_get_hw_semaphore_generic(hw)) {
+                       ret_val = -E1000_ERR_SWFW_SYNC;
+                       goto out;
+               }
+
+               swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+               if (!(swfw_sync & (fwmask | swmask)))
+                       break;
+
+               /*
+                * Firmware currently using resource (fwmask)
+                * or other software thread using resource (swmask)
+                */
+               e1000_put_hw_semaphore_generic(hw);
+               msec_delay_irq(5);
+               i++;
+       }
+
+       if (i == timeout) {
+               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+               ret_val = -E1000_ERR_SWFW_SYNC;
+               goto out;
+       }
+
+       swfw_sync |= swmask;
+       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+       e1000_put_hw_semaphore_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_release_swfw_sync_82575 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+
+       DEBUGFUNC("e1000_release_swfw_sync_82575");
+
+       while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
+       /* Empty */
+
+       swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+       swfw_sync &= ~mask;
+       E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+       e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ *  e1000_get_cfg_done_82575 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_82575(struct e1000_hw *hw)
+{
+       s32 timeout = PHY_CFG_TIMEOUT;
+       s32 ret_val = E1000_SUCCESS;
+       u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+       DEBUGFUNC("e1000_get_cfg_done_82575");
+
+       if (hw->bus.func == E1000_FUNC_1)
+               mask = E1000_NVM_CFG_DONE_PORT_1;
+       else if (hw->bus.func == E1000_FUNC_2)
+               mask = E1000_NVM_CFG_DONE_PORT_2;
+       else if (hw->bus.func == E1000_FUNC_3)
+               mask = E1000_NVM_CFG_DONE_PORT_3;
+       while (timeout) {
+               if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+                       break;
+               msec_delay(1);
+               timeout--;
+       }
+       if (!timeout)
+               DEBUGOUT("MNG configuration cycle has not completed.\n");
+
+       /* If EEPROM is not marked present, init the PHY manually */
+       if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+           (hw->phy.type == e1000_phy_igp_3))
+               e1000_phy_init_script_igp3(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_82575 - Get link speed/duplex info
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  This is a wrapper function, if using the serial gigabit media independent
+ *  interface, use PCS to retrieve the link speed and duplex information.
+ *  Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 e1000_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+                                        u16 *duplex)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_get_link_up_info_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper)
+               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, speed,
+                                                              duplex);
+       else
+               ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed,
+                                                                   duplex);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_link_82575 - Check for link
+ *  @hw: pointer to the HW structure
+ *
+ *  If sgmii is enabled, then use the pcs register to determine link, otherwise
+ *  use the generic interface for determining link.
+ **/
+static s32 e1000_check_for_link_82575(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 speed, duplex;
+
+       DEBUGFUNC("e1000_check_for_link_82575");
+
+       if (hw->phy.media_type != e1000_media_type_copper) {
+               ret_val = e1000_get_pcs_speed_and_duplex_82575(hw, &speed,
+                                                              &duplex);
+               /*
+                * Use this flag to determine if link needs to be checked or
+                * not.  If we have link clear the flag so that we do not
+                * continue to check for link.
+                */
+               hw->mac.get_link_status = !hw->mac.serdes_has_link;
+       } else {
+               ret_val = e1000_check_for_copper_link_generic(hw);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ *  @hw: pointer to the HW structure
+ **/
+static void e1000_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 reg;
+
+       DEBUGFUNC("e1000_power_up_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return;
+
+       /* Enable PCS to turn on link */
+       reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+       reg |= E1000_PCS_CFG_PCS_EN;
+       E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+       /* Power up the laser */
+       reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+       /* flush the write to verify completion */
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(1);
+}
+
+/**
+ *  e1000_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Using the physical coding sub-layer (PCS), retrieve the current speed and
+ *  duplex, then store the values in the pointers provided.
+ **/
+static s32 e1000_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw,
+                                                u16 *speed, u16 *duplex)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 pcs;
+
+       DEBUGFUNC("e1000_get_pcs_speed_and_duplex_82575");
+
+       /* Set up defaults for the return values of this function */
+       mac->serdes_has_link = FALSE;
+       *speed = 0;
+       *duplex = 0;
+
+       /*
+        * Read the PCS Status register for link state. For non-copper mode,
+        * the status register is not accurate. The PCS status register is
+        * used instead.
+        */
+       pcs = E1000_READ_REG(hw, E1000_PCS_LSTAT);
+
+       /*
+        * The link up bit determines when link is up on autoneg. The sync ok
+        * gets set once both sides sync up and agree upon link. Stable link
+        * can be determined by checking for both link up and link sync ok
+        */
+       if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+               mac->serdes_has_link = TRUE;
+
+               /* Detect and store PCS speed */
+               if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+                       *speed = SPEED_1000;
+               } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+                       *speed = SPEED_100;
+               } else {
+                       *speed = SPEED_10;
+               }
+
+               /* Detect and store PCS duplex */
+               if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+                       *duplex = FULL_DUPLEX;
+               } else {
+                       *duplex = HALF_DUPLEX;
+               }
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_shutdown_serdes_link_82575 - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  In the case of serdes shut down sfp and PCS on driver unload
+ *  when management pass thru is not enabled.
+ **/
+void e1000_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 reg;
+
+       DEBUGFUNC("e1000_shutdown_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return;
+
+       if (!e1000_enable_mng_pass_thru(hw)) {
+               /* Disable PCS to turn off link */
+               reg = E1000_READ_REG(hw, E1000_PCS_CFG0);
+               reg &= ~E1000_PCS_CFG_PCS_EN;
+               E1000_WRITE_REG(hw, E1000_PCS_CFG0, reg);
+
+               /* shutdown the laser */
+               reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+               reg |= E1000_CTRL_EXT_SDP3_DATA;
+               E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+               /* flush the write to verify completion */
+               E1000_WRITE_FLUSH(hw);
+               msec_delay(1);
+       }
+
+       return;
+}
+
+/**
+ *  e1000_reset_hw_82575 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82575(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_reset_hw_82575");
+
+       /*
+        * Prevent the PCI-E bus from sticking if there is no TLP connection
+        * on the last TLP read/write transaction when MAC is reset.
+        */
+       ret_val = e1000_disable_pcie_master_generic(hw);
+       if (ret_val) {
+               DEBUGOUT("PCI-E Master disable polling has failed.\n");
+       }
+
+       /* set the completion timeout for interface */
+       ret_val = e1000_set_pcie_completion_timeout(hw);
+       if (ret_val) {
+               DEBUGOUT("PCI-E Set completion timeout has failed.\n");
+       }
+
+       DEBUGOUT("Masking off all interrupts\n");
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+       E1000_WRITE_REG(hw, E1000_RCTL, 0);
+       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+       E1000_WRITE_FLUSH(hw);
+
+       msec_delay(10);
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       DEBUGOUT("Issuing a global reset to MAC\n");
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+       ret_val = e1000_get_auto_rd_done_generic(hw);
+       if (ret_val) {
+               /*
+                * When auto config read does not complete, do not
+                * return with an error. This can happen in situations
+                * where there is no eeprom and prevents getting link.
+                */
+               DEBUGOUT("Auto Read Done did not complete\n");
+       }
+
+       /* If EEPROM is not present, run manual init scripts */
+       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+               e1000_reset_init_script_82575(hw);
+
+       /* Clear any pending interrupt events. */
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_READ_REG(hw, E1000_ICR);
+
+       /* Install any alternate MAC address into RAR0 */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82575 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82575(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       u16 i, rar_count = mac->rar_entry_count;
+
+       DEBUGFUNC("e1000_init_hw_82575");
+
+       /* Initialize identification LED */
+       ret_val = mac->ops.id_led_init(hw);
+       if (ret_val) {
+               DEBUGOUT("Error initializing identification LED\n");
+               /* This is not fatal and we should not stop init due to this */
+       }
+
+       /* Disabling VLAN filtering */
+       DEBUGOUT("Initializing the IEEE VLAN\n");
+       mac->ops.clear_vfta(hw);
+
+       /* Setup the receive address */
+       e1000_init_rx_addrs_generic(hw, rar_count);
+
+       /* Zero out the Multicast HASH table */
+       DEBUGOUT("Zeroing the MTA\n");
+       for (i = 0; i < mac->mta_reg_count; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+       /* Zero out the Unicast HASH table */
+       DEBUGOUT("Zeroing the UTA\n");
+       for (i = 0; i < mac->uta_reg_count; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_UTA, i, 0);
+
+       /* Setup link and flow control */
+       ret_val = mac->ops.setup_link(hw);
+
+       /*
+        * Clear all of the statistics registers (clear on read).  It is
+        * important that we do this after we have tried to establish link
+        * because the symbol error count will increment wildly if there
+        * is no link.
+        */
+       e1000_clear_hw_cntrs_82575(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82575 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82575(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32  ret_val;
+
+       DEBUGFUNC("e1000_setup_copper_link_82575");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= E1000_CTRL_SLU;
+       ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       ret_val = e1000_setup_serdes_link_82575(hw);
+       if (ret_val)
+               goto out;
+
+       if (e1000_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+               /* allow time for SFP cage time to power up phy */
+               msec_delay(300);
+
+               ret_val = hw->phy.ops.reset(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error resetting the PHY.\n");
+                       goto out;
+               }
+       }
+       switch (hw->phy.type) {
+       case e1000_phy_m88:
+               if (hw->phy.id == I347AT4_E_PHY_ID ||
+                   hw->phy.id == M88E1112_E_PHY_ID ||
+                   hw->phy.id == M88E1340M_E_PHY_ID)
+                       ret_val = e1000_copper_link_setup_m88_gen2(hw);
+               else
+                       ret_val = e1000_copper_link_setup_m88(hw);
+               break;
+       case e1000_phy_igp_3:
+               ret_val = e1000_copper_link_setup_igp(hw);
+               break;
+       case e1000_phy_82580:
+               ret_val = e1000_copper_link_setup_82577(hw);
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               break;
+       }
+
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_setup_copper_link_generic(hw);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_serdes_link_82575 - Setup link for serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  used on copper connections where the serialized gigabit media independent
+ *  interface (sgmii), or serdes fiber is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
+ **/
+static s32 e1000_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+       u32 ctrl_ext, ctrl_reg, reg;
+       bool pcs_autoneg;
+
+       DEBUGFUNC("e1000_setup_serdes_link_82575");
+
+       if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+           !e1000_sgmii_active_82575(hw))
+               return E1000_SUCCESS;
+
+       /*
+        * On the 82575, SerDes loopback mode persists until it is
+        * explicitly turned off or a power cycle is performed.  A read to
+        * the register does not indicate its status.  Therefore, we ensure
+        * loopback mode is disabled during initialization.
+        */
+       E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+       /* power on the sfp cage if present */
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+       ctrl_reg = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl_reg |= E1000_CTRL_SLU;
+
+       /* set both sw defined pins on 82575/82576*/
+       if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576)
+               ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+       reg = E1000_READ_REG(hw, E1000_PCS_LCTL);
+
+       /* default pcs_autoneg to the same setting as mac autoneg */
+       pcs_autoneg = hw->mac.autoneg;
+
+       switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+       case E1000_CTRL_EXT_LINK_MODE_SGMII:
+               /* sgmii mode lets the phy handle forcing speed/duplex */
+               pcs_autoneg = TRUE;
+               /* autoneg time out should be disabled for SGMII mode */
+               reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+               break;
+       case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+               /* disable PCS autoneg and support parallel detect only */
+               pcs_autoneg = FALSE;
+               /* fall through to default case */
+       default:
+               /*
+                * non-SGMII modes only supports a speed of 1000/Full for the
+                * link so it is best to just force the MAC and let the pcs
+                * link either autoneg or be forced to 1000/Full
+                */
+               ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+                           E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+               /* set speed of 1000/Full if speed/duplex is forced */
+               reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+               break;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl_reg);
+
+       /*
+        * New SerDes mode allows for forcing speed or autonegotiating speed
+        * at 1gb. Autoneg should be default set by most drivers. This is the
+        * mode that will be compatible with older link partners and switches.
+        * However, both are supported by the hardware and some drivers/tools.
+        */
+       reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+                E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+       /*
+        * We force flow control to prevent the CTRL register values from being
+        * overwritten by the autonegotiated flow control values
+        */
+       reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+       if (pcs_autoneg) {
+               /* Set PCS register for autoneg */
+               reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+                      E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+               DEBUGOUT1("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+       } else {
+               /* Set PCS register for forced link */
+               reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
+               DEBUGOUT1("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+       }
+
+       E1000_WRITE_REG(hw, E1000_PCS_LCTL, reg);
+
+       if (!e1000_sgmii_active_82575(hw))
+               e1000_force_mac_fc_generic(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_valid_led_default_82575 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82575(struct e1000_hw *hw, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_valid_led_default_82575");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+               switch(hw->phy.media_type) {
+               case e1000_media_type_internal_serdes:
+                       *data = ID_LED_DEFAULT_82575_SERDES;
+                       break;
+               case e1000_media_type_copper:
+               default:
+                       *data = ID_LED_DEFAULT;
+                       break;
+               }
+       }
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_sgmii_active_82575 - Return sgmii state
+ *  @hw: pointer to the HW structure
+ *
+ *  82575 silicon has a serialized gigabit media independent interface (sgmii)
+ *  which can be enabled for use in the embedded applications.  Simply
+ *  return the current state of the sgmii interface.
+ **/
+static bool e1000_sgmii_active_82575(struct e1000_hw *hw)
+{
+       struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+       return dev_spec->sgmii_active;
+}
+
+/**
+ *  e1000_reset_init_script_82575 - Inits HW defaults after reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Inits recommended HW defaults after a reset when there is no EEPROM
+ *  detected. This is only for the 82575.
+ **/
+static s32 e1000_reset_init_script_82575(struct e1000_hw* hw)
+{
+       DEBUGFUNC("e1000_reset_init_script_82575");
+
+       if (hw->mac.type == e1000_82575) {
+               DEBUGOUT("Running reset init script for 82575\n");
+               /* SerDes configuration via SERDESCTRL */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x00, 0x0C);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x01, 0x78);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x1B, 0x23);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCTL, 0x23, 0x15);
+
+               /* CCM configuration via CCMCTL register */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x14, 0x00);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_CCMCTL, 0x10, 0x00);
+
+               /* PCIe lanes configuration */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x00, 0xEC);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x61, 0xDF);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x34, 0x05);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+               /* PCIe PLL Configuration */
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x02, 0x47);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x14, 0x00);
+               e1000_write_8bit_ctrl_reg_generic(hw, E1000_SCCTL, 0x10, 0x00);
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr_82575 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82575(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_mac_addr_82575");
+
+       /*
+        * If there's an alternate MAC address place it in RAR0
+        * so that it will override the Si installed default perm
+        * address.
+        */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_82575 - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+static void e1000_config_collision_dist_82575(struct e1000_hw *hw)
+{
+       u32 tctl_ext;
+
+       DEBUGFUNC("e1000_config_collision_dist_82575");
+
+       tctl_ext = E1000_READ_REG(hw, E1000_TCTL_EXT);
+
+       tctl_ext &= ~E1000_TCTL_EXT_COLD;
+       tctl_ext |= E1000_COLLISION_DISTANCE << E1000_TCTL_EXT_COLD_SHIFT;
+
+       E1000_WRITE_REG(hw, E1000_TCTL_EXT, tctl_ext);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ * e1000_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+
+       if (!(phy->ops.check_reset_block))
+               return;
+
+       /* If the management interface is not enabled, then power down */
+       if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
+               e1000_power_down_phy_copper(hw);
+
+       return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_clear_hw_cntrs_82575");
+
+       e1000_clear_hw_cntrs_base_generic(hw);
+
+       E1000_READ_REG(hw, E1000_PRC64);
+       E1000_READ_REG(hw, E1000_PRC127);
+       E1000_READ_REG(hw, E1000_PRC255);
+       E1000_READ_REG(hw, E1000_PRC511);
+       E1000_READ_REG(hw, E1000_PRC1023);
+       E1000_READ_REG(hw, E1000_PRC1522);
+       E1000_READ_REG(hw, E1000_PTC64);
+       E1000_READ_REG(hw, E1000_PTC127);
+       E1000_READ_REG(hw, E1000_PTC255);
+       E1000_READ_REG(hw, E1000_PTC511);
+       E1000_READ_REG(hw, E1000_PTC1023);
+       E1000_READ_REG(hw, E1000_PTC1522);
+
+       E1000_READ_REG(hw, E1000_ALGNERRC);
+       E1000_READ_REG(hw, E1000_RXERRC);
+       E1000_READ_REG(hw, E1000_TNCRS);
+       E1000_READ_REG(hw, E1000_CEXTERR);
+       E1000_READ_REG(hw, E1000_TSCTC);
+       E1000_READ_REG(hw, E1000_TSCTFC);
+
+       E1000_READ_REG(hw, E1000_MGTPRC);
+       E1000_READ_REG(hw, E1000_MGTPDC);
+       E1000_READ_REG(hw, E1000_MGTPTC);
+
+       E1000_READ_REG(hw, E1000_IAC);
+       E1000_READ_REG(hw, E1000_ICRXOC);
+
+       E1000_READ_REG(hw, E1000_ICRXPTC);
+       E1000_READ_REG(hw, E1000_ICRXATC);
+       E1000_READ_REG(hw, E1000_ICTXPTC);
+       E1000_READ_REG(hw, E1000_ICTXATC);
+       E1000_READ_REG(hw, E1000_ICTXQEC);
+       E1000_READ_REG(hw, E1000_ICTXQMTC);
+       E1000_READ_REG(hw, E1000_ICRXDMTC);
+
+       E1000_READ_REG(hw, E1000_CBTMPC);
+       E1000_READ_REG(hw, E1000_HTDPMC);
+       E1000_READ_REG(hw, E1000_CBRMPC);
+       E1000_READ_REG(hw, E1000_RPTHC);
+       E1000_READ_REG(hw, E1000_HGPTC);
+       E1000_READ_REG(hw, E1000_HTCBDPC);
+       E1000_READ_REG(hw, E1000_HGORCL);
+       E1000_READ_REG(hw, E1000_HGORCH);
+       E1000_READ_REG(hw, E1000_HGOTCL);
+       E1000_READ_REG(hw, E1000_HGOTCH);
+       E1000_READ_REG(hw, E1000_LENERRS);
+
+       /* This register should not be read in copper configurations */
+       if ((hw->phy.media_type == e1000_media_type_internal_serdes) ||
+           e1000_sgmii_active_82575(hw))
+               E1000_READ_REG(hw, E1000_SCVPC);
+}
+
+/**
+ *  e1000_rx_fifo_flush_82575 - Clean rx fifo after Rx enable
+ *  @hw: pointer to the HW structure
+ *
+ *  After rx enable if managability is enabled then there is likely some
+ *  bad data at the start of the fifo and possibly in the DMA fifo.  This
+ *  function clears the fifos and flushes any packets that came in as rx was
+ *  being enabled.
+ **/
+void e1000_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+       u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+       int i, ms_wait;
+
+       DEBUGFUNC("e1000_rx_fifo_workaround_82575");
+       if (hw->mac.type != e1000_82575 ||
+           !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+               return;
+
+       /* Disable all Rx queues */
+       for (i = 0; i < 4; i++) {
+               rxdctl[i] = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i),
+                               rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+       }
+       /* Poll all queues to verify they have shut down */
+       for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+               msec_delay(1);
+               rx_enabled = 0;
+               for (i = 0; i < 4; i++)
+                       rx_enabled |= E1000_READ_REG(hw, E1000_RXDCTL(i));
+               if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+                       break;
+       }
+
+       if (ms_wait == 10)
+               DEBUGOUT("Queue disable timed out after 10ms\n");
+
+       /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+        * incoming packets are rejected.  Set enable and wait 2ms so that
+        * any packet that was coming in as RCTL.EN was set is flushed
+        */
+       rfctl = E1000_READ_REG(hw, E1000_RFCTL);
+       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+       rlpml = E1000_READ_REG(hw, E1000_RLPML);
+       E1000_WRITE_REG(hw, E1000_RLPML, 0);
+
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+       temp_rctl |= E1000_RCTL_LPE;
+
+       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl);
+       E1000_WRITE_REG(hw, E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(2);
+
+       /* Enable Rx queues that were previously enabled and restore our
+        * previous state
+        */
+       for (i = 0; i < 4; i++)
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl[i]);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+       E1000_WRITE_FLUSH(hw);
+
+       E1000_WRITE_REG(hw, E1000_RLPML, rlpml);
+       E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
+
+       /* Flush receive errors generated by workaround */
+       E1000_READ_REG(hw, E1000_ROC);
+       E1000_READ_REG(hw, E1000_RNBC);
+       E1000_READ_REG(hw, E1000_MPC);
+}
+
+/**
+ *  e1000_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 200ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+static s32 e1000_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+       u32 gcr = E1000_READ_REG(hw, E1000_GCR);
+       s32 ret_val = E1000_SUCCESS;
+       u16 pcie_devctl2;
+
+       /* only take action if timeout value is defaulted to 0 */
+       if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+               goto out;
+
+       /*
+        * if capababilities version is type 1 we can write the
+        * timeout of 10ms to 200ms through the GCR register
+        */
+       if (!(gcr & E1000_GCR_CAP_VER2)) {
+               gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+               goto out;
+       }
+
+       /*
+        * for version 2 capabilities we need to write the config space
+        * directly in order to set the completion timeout value for
+        * 16ms to 55ms
+        */
+       ret_val = e1000_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+                                         &pcie_devctl2);
+       if (ret_val)
+               goto out;
+
+       pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+       ret_val = e1000_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+                                          &pcie_devctl2);
+out:
+       /* disable completion timeout resend */
+       gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+       E1000_WRITE_REG(hw, E1000_GCR, gcr);
+       return ret_val;
+}
+
+/**
+ *  e1000_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *  @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ *  enables/disables L2 switch anti-spoofing functionality.
+ **/
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+       u32 dtxswc;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+               if (enable) {
+                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
+                       /* The PF can spoof - it has to in order to
+                        * support emulation mode NICs */
+                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+               } else {
+                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
+               }
+               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+               break;
+       case e1000_i350:
+               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+               if (enable) {
+                       dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+                                  E1000_DTXSWC_VLAN_SPOOF_MASK);
+                       /* The PF can spoof - it has to in order to
+                        * support emulation mode NICs
+                        */
+                       dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+               } else {
+                       dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+                                   E1000_DTXSWC_VLAN_SPOOF_MASK);
+               }
+               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+       default:
+               break;
+       }
+}
+
+/**
+ *  e1000_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables L2 switch loopback functionality.
+ **/
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+       u32 dtxswc;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+               dtxswc = E1000_READ_REG(hw, E1000_DTXSWC);
+               if (enable)
+                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               else
+                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               E1000_WRITE_REG(hw, E1000_DTXSWC, dtxswc);
+               break;
+       case e1000_i350:
+               dtxswc = E1000_READ_REG(hw, E1000_TXSWC);
+               if (enable)
+                       dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               else
+                       dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+               E1000_WRITE_REG(hw, E1000_TXSWC, dtxswc);
+               break;
+       default:
+               /* Currently no other hardware supports loopback */
+               break;
+       }
+
+
+}
+
+/**
+ *  e1000_vmdq_set_replication_pf - enable or disable vmdq replication
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables replication of packets across multiple pools.
+ **/
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+       u32 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
+
+       if (enable)
+               vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+       else
+               vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+       E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ *  e1000_read_phy_reg_82580 - Read 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 e1000_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_read_phy_reg_82580");
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_mdic(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_82580 - Write 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_write_phy_reg_82580");
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_mdic(hw, offset, data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 e1000_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u32 mdicnfg;
+       u16 nvm_data = 0;
+
+       DEBUGFUNC("e1000_reset_mdicnfg_82580");
+
+       if (hw->mac.type != e1000_82580)
+               goto out;
+       if (!e1000_sgmii_active_82575(hw))
+               goto out;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+                                  NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+                                  &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       mdicnfg = E1000_READ_REG(hw, E1000_MDICNFG);
+       if (nvm_data & NVM_WORD24_EXT_MDIO)
+               mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+       if (nvm_data & NVM_WORD24_COM_MDIO)
+               mdicnfg |= E1000_MDICNFG_COM_MDIO;
+       E1000_WRITE_REG(hw, E1000_MDICNFG, mdicnfg);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82580 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets function or entire device (all ports, etc.)
+ *  to a known state.
+ **/
+static s32 e1000_reset_hw_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       /* BH SW mailbox bit in SW_FW_SYNC */
+       u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+       u32 ctrl;
+       bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+       DEBUGFUNC("e1000_reset_hw_82580");
+
+       hw->dev_spec._82575.global_device_reset = FALSE;
+
+       /* Get current control state. */
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /*
+        * Prevent the PCI-E bus from sticking if there is no TLP connection
+        * on the last TLP read/write transaction when MAC is reset.
+        */
+       ret_val = e1000_disable_pcie_master_generic(hw);
+       if (ret_val)
+               DEBUGOUT("PCI-E Master disable polling has failed.\n");
+
+       DEBUGOUT("Masking off all interrupts\n");
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_WRITE_REG(hw, E1000_RCTL, 0);
+       E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+       E1000_WRITE_FLUSH(hw);
+
+       msec_delay(10);
+
+       /* Determine whether or not a global dev reset is requested */
+       if (global_device_reset &&
+               e1000_acquire_swfw_sync_82575(hw, swmbsw_mask))
+                       global_device_reset = FALSE;
+
+       if (global_device_reset &&
+               !(E1000_READ_REG(hw, E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+               ctrl |= E1000_CTRL_DEV_RST;
+       else
+               ctrl |= E1000_CTRL_RST;
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       /* Add delay to insure DEV_RST has time to complete */
+       if (global_device_reset)
+               msec_delay(5);
+
+       ret_val = e1000_get_auto_rd_done_generic(hw);
+       if (ret_val) {
+               /*
+                * When auto config read does not complete, do not
+                * return with an error. This can happen in situations
+                * where there is no eeprom and prevents getting link.
+                */
+               DEBUGOUT("Auto Read Done did not complete\n");
+       }
+
+       /* If EEPROM is not present, run manual init scripts */
+       if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0)
+               e1000_reset_init_script_82575(hw);
+
+       /* clear global device reset status bit */
+       E1000_WRITE_REG(hw, E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+       /* Clear any pending interrupt events. */
+       E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+       E1000_READ_REG(hw, E1000_ICR);
+
+       ret_val = e1000_reset_mdicnfg_82580(hw);
+       if (ret_val)
+               DEBUGOUT("Could not reset MDICNFG based on EEPROM\n");
+
+       /* Install any alternate MAC address into RAR0 */
+       ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+       /* Release semaphore */
+       if (global_device_reset)
+               e1000_release_swfw_sync_82575(hw, swmbsw_mask);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual Rx PBA size
+ *  @data: data received by reading RXPBS register
+ *
+ *  The 82580 uses a table based approach for packet buffer allocation sizes.
+ *  This function converts the retrieved value into the correct table value
+ *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ *  0x0 36  72 144   1   2   4   8  16
+ *  0x8 35  70 140 rsv rsv rsv rsv rsv
+ */
+u16 e1000_rxpbs_adjust_82580(u32 data)
+{
+       u16 ret_val = 0;
+
+       if (data < E1000_82580_RXPBS_TABLE_SIZE)
+               ret_val = e1000_82580_rxpbs_table[data];
+
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_with_offset - Validate EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_with_offset");
+
+       for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+
+       if (checksum != (u16) NVM_SUM) {
+               DEBUGOUT("NVM Checksum Invalid\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_with_offset - Update EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+       s32 ret_val;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_with_offset");
+
+       for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error while updating checksum.\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+       checksum = (u16) NVM_SUM - checksum;
+       ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+                               &checksum);
+       if (ret_val)
+               DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 eeprom_regions_count = 1;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_82580");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+               /* if chekcsums compatibility bit is set validate checksums
+                * for all 4 ports. */
+               eeprom_regions_count = 4;
+       }
+
+       for (j = 0; j < eeprom_regions_count; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82580 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 j, nvm_data;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_82580");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error while updating checksum"
+                       " compatibility bit.\n");
+               goto out;
+       }
+
+       if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+               /* set compatibility bit to validate checksums appropriately */
+               nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+               ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+                                       &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Write Error while updating checksum"
+                               " compatibility bit.\n");
+                       goto out;
+               }
+       }
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val) {
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 j;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_i350");
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_validate_nvm_checksum_with_offset(hw,
+                                                               nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_i350 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 j;
+       u16 nvm_offset;
+
+       DEBUGFUNC("e1000_update_nvm_checksum_i350");
+
+       for (j = 0; j < 4; j++) {
+               nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+               ret_val = e1000_update_nvm_checksum_with_offset(hw, nvm_offset);
+               if (ret_val != E1000_SUCCESS)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_eee_i350 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 e1000_set_eee_i350(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u32 ipcnfg, eeer, ctrl_ext;
+
+       DEBUGFUNC("e1000_set_eee_i350");
+
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       if ((hw->mac.type != e1000_i350) ||
+           (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK))
+               goto out;
+       ipcnfg = E1000_READ_REG(hw, E1000_IPCNFG);
+       eeer = E1000_READ_REG(hw, E1000_EEER);
+
+       /* enable or disable per user setting */
+       if (!(hw->dev_spec._82575.eee_disable)) {
+               ipcnfg |= (E1000_IPCNFG_EEE_1G_AN |
+                          E1000_IPCNFG_EEE_100M_AN);
+               eeer |= (E1000_EEER_TX_LPI_EN |
+                        E1000_EEER_RX_LPI_EN |
+                        E1000_EEER_LPI_FC);
+
+       } else {
+               ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+                           E1000_IPCNFG_EEE_100M_AN);
+               eeer &= ~(E1000_EEER_TX_LPI_EN |
+                         E1000_EEER_RX_LPI_EN |
+                         E1000_EEER_LPI_FC);
+       }
+       E1000_WRITE_REG(hw, E1000_IPCNFG, ipcnfg);
+       E1000_WRITE_REG(hw, E1000_EEER, eeer);
+                       E1000_READ_REG(hw, E1000_IPCNFG);
+                       E1000_READ_REG(hw, E1000_EEER);
+out:
+
+       return ret_val;
+}
diff --git a/lib/librte_pmd_igb/igb/e1000_82575.h b/lib/librte_pmd_igb/igb/e1000_82575.h
new file mode 100644 (file)
index 0000000..415756e
--- /dev/null
@@ -0,0 +1,487 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+                                     (ID_LED_DEF1_DEF2 <<  8) | \
+                                     (ID_LED_DEF1_DEF2 <<  4) | \
+                                     (ID_LED_OFF1_ON2))
+/*
+ * Receive Address Register Count
+ * Number of high/low register pairs in the RAR.  The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * These entries are also used for MAC-based filtering.
+ */
+/*
+ * For 82576, there are an additional set of RARs that begin at an offset
+ * separate from the first set of RARs.
+ */
+#define E1000_RAR_ENTRIES_82575        16
+#define E1000_RAR_ENTRIES_82576        24
+#define E1000_RAR_ENTRIES_82580        24
+#define E1000_RAR_ENTRIES_I350         32
+#define E1000_SW_SYNCH_MB              0x00000100
+#define E1000_STAT_DEV_RST_SET         0x00100000
+#define E1000_CTRL_DEV_RST             0x20000000
+
+#ifdef E1000_BIT_FIELDS
+struct e1000_adv_data_desc {
+       __le64 buffer_addr;    /* Address of the descriptor's data buffer */
+       union {
+               u32 data;
+               struct {
+                       u32 datalen :16; /* Data buffer length */
+                       u32 rsvd    :4;
+                       u32 dtyp    :4;  /* Descriptor type */
+                       u32 dcmd    :8;  /* Descriptor command */
+               } config;
+       } lower;
+       union {
+               u32 data;
+               struct {
+                       u32 status  :4;  /* Descriptor status */
+                       u32 idx     :4;
+                       u32 popts   :6;  /* Packet Options */
+                       u32 paylen  :18; /* Payload length */
+               } options;
+       } upper;
+};
+
+#define E1000_TXD_DTYP_ADV_C    0x2  /* Advanced Context Descriptor */
+#define E1000_TXD_DTYP_ADV_D    0x3  /* Advanced Data Descriptor */
+#define E1000_ADV_TXD_CMD_DEXT  0x20 /* Descriptor extension (0 = legacy) */
+#define E1000_ADV_TUCMD_IPV4    0x2  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADV_TUCMD_IPV6    0x0  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADV_TUCMD_L4T_UDP 0x0  /* L4 Packet TYPE of UDP */
+#define E1000_ADV_TUCMD_L4T_TCP 0x4  /* L4 Packet TYPE of TCP */
+#define E1000_ADV_TUCMD_MKRREQ  0x10 /* Indicates markers are required */
+#define E1000_ADV_DCMD_EOP      0x1  /* End of Packet */
+#define E1000_ADV_DCMD_IFCS     0x2  /* Insert FCS (Ethernet CRC) */
+#define E1000_ADV_DCMD_RS       0x8  /* Report Status */
+#define E1000_ADV_DCMD_VLE      0x40 /* Add VLAN tag */
+#define E1000_ADV_DCMD_TSE      0x80 /* TCP Seg enable */
+/* Extended Device Control */
+#define E1000_CTRL_EXT_NSICR    0x00000001 /* Disable Intr Clear all on read */
+
+struct e1000_adv_context_desc {
+       union {
+               u32 ip_config;
+               struct {
+                       u32 iplen    :9;
+                       u32 maclen   :7;
+                       u32 vlan_tag :16;
+               } fields;
+       } ip_setup;
+       u32 seq_num;
+       union {
+               u64 l4_config;
+               struct {
+                       u32 mkrloc :9;
+                       u32 tucmd  :11;
+                       u32 dtyp   :4;
+                       u32 adv    :8;
+                       u32 rsvd   :4;
+                       u32 idx    :4;
+                       u32 l4len  :8;
+                       u32 mss    :16;
+               } fields;
+       } l4_setup;
+};
+#endif
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
+#define E1000_SRRCTL_TIMESTAMP                          0x40000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+
+#define E1000_TX_HEAD_WB_ENABLE   0x1
+#define E1000_TX_SEQNUM_WB_ENABLE 0x2
+
+#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
+#define E1000_MRQC_ENABLE_VMDQ              0x00000003
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
+#define E1000_MRQC_ENABLE_RSS_8Q            0x00000002
+
+#define E1000_VMRCTL_MIRROR_PORT_SHIFT      8
+#define E1000_VMRCTL_MIRROR_DSTPORT_MASK    (7 << E1000_VMRCTL_MIRROR_PORT_SHIFT)
+#define E1000_VMRCTL_POOL_MIRROR_ENABLE     (1 << 0)
+#define E1000_VMRCTL_UPLINK_MIRROR_ENABLE   (1 << 1)
+#define E1000_VMRCTL_DOWNLINK_MIRROR_ENABLE (1 << 2)
+
+#define E1000_EICR_TX_QUEUE ( \
+    E1000_EICR_TX_QUEUE0 |    \
+    E1000_EICR_TX_QUEUE1 |    \
+    E1000_EICR_TX_QUEUE2 |    \
+    E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+    E1000_EICR_RX_QUEUE0 |    \
+    E1000_EICR_RX_QUEUE1 |    \
+    E1000_EICR_RX_QUEUE2 |    \
+    E1000_EICR_RX_QUEUE3)
+
+#define E1000_EIMS_RX_QUEUE E1000_EICR_RX_QUEUE
+#define E1000_EIMS_TX_QUEUE E1000_EICR_TX_QUEUE
+
+#define EIMS_ENABLE_MASK ( \
+    E1000_EIMS_RX_QUEUE  | \
+    E1000_EIMS_TX_QUEUE  | \
+    E1000_EIMS_TCP_TIMER | \
+    E1000_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
+#define E1000_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
+#define E1000_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
+#define E1000_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
+#define E1000_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
+#define E1000_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
+#define E1000_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+       struct {
+               __le64 pkt_addr;             /* Packet buffer address */
+               __le64 hdr_addr;             /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       union {
+                               __le32 data;
+                               struct {
+                                       __le16 pkt_info; /*RSS type, Pkt type*/
+                                       /* Split Header, header buffer len */
+                                       __le16 hdr_info;
+                               } hs_rss;
+                       } lo_dword;
+                       union {
+                               __le32 rss;          /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;    /* IP id */
+                                       __le16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;     /* ext status/error */
+                       __le16 length;           /* Packet length */
+                       __le16 vlan;             /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define E1000_RXDADV_RSSTYPE_MASK        0x0000000F
+#define E1000_RXDADV_RSSTYPE_SHIFT       12
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_SPLITHEADER_EN      0x00001000
+#define E1000_RXDADV_SPH                 0x8000
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP           0x08000 /* timestamp in packet */
+#define E1000_RXDADV_ERR_HBO             0x00800000
+
+/* RSS Hash results */
+#define E1000_RXDADV_RSSTYPE_NONE        0x00000000
+#define E1000_RXDADV_RSSTYPE_IPV4_TCP    0x00000001
+#define E1000_RXDADV_RSSTYPE_IPV4        0x00000002
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP    0x00000003
+#define E1000_RXDADV_RSSTYPE_IPV6_EX     0x00000004
+#define E1000_RXDADV_RSSTYPE_IPV6        0x00000005
+#define E1000_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define E1000_RXDADV_RSSTYPE_IPV4_UDP    0x00000007
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP    0x00000008
+#define E1000_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor */
+#define E1000_RXDADV_PKTTYPE_NONE        0x00000000
+#define E1000_RXDADV_PKTTYPE_IPV4        0x00000010 /* IPV4 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV4_EX     0x00000020 /* IPV4 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_IPV6        0x00000040 /* IPV6 hdr present */
+#define E1000_RXDADV_PKTTYPE_IPV6_EX     0x00000080 /* IPV6 hdr + extensions */
+#define E1000_RXDADV_PKTTYPE_TCP         0x00000100 /* TCP hdr present */
+#define E1000_RXDADV_PKTTYPE_UDP         0x00000200 /* UDP hdr present */
+#define E1000_RXDADV_PKTTYPE_SCTP        0x00000400 /* SCTP hdr present */
+#define E1000_RXDADV_PKTTYPE_NFS         0x00000800 /* NFS hdr present */
+
+#define E1000_RXDADV_PKTTYPE_IPSEC_ESP   0x00001000 /* IPSec ESP */
+#define E1000_RXDADV_PKTTYPE_IPSEC_AH    0x00002000 /* IPSec AH */
+#define E1000_RXDADV_PKTTYPE_LINKSEC     0x00004000 /* LinkSec Encap */
+#define E1000_RXDADV_PKTTYPE_ETQF        0x00008000 /* PKTTYPE is ETQF index */
+#define E1000_RXDADV_PKTTYPE_ETQF_MASK   0x00000070 /* ETQF has 8 indices */
+#define E1000_RXDADV_PKTTYPE_ETQF_SHIFT  4          /* Right-shift 4 bits */
+
+/* LinkSec results */
+/* Security Processing bit Indication */
+#define E1000_RXDADV_LNKSEC_STATUS_SECP         0x00020000
+#define E1000_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
+#define E1000_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
+#define E1000_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
+#define E1000_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
+
+#define E1000_RXDADV_IPSEC_STATUS_SECP          0x00020000
+#define E1000_RXDADV_IPSEC_ERROR_BIT_MASK       0x18000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
+#define E1000_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
+#define E1000_RXDADV_IPSEC_ERROR_AUTHENTICATION_FAILED  0x18000000
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+       struct {
+               __le64 buffer_addr;    /* Address of descriptor's data buf */
+               __le32 cmd_type_len;
+               __le32 olinfo_status;
+       } read;
+       struct {
+               __le64 rsvd;       /* Reserved */
+               __le32 nxtseq_seed;
+               __le32 status;
+       } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DDTYP_ISCSI  0x10000000 /* DDP hdr type or iSCSI */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_MAC_LINKSEC  0x00040000 /* Apply LinkSec on packet */
+#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_STAT_SN_CRC  0x00000002 /* NXTSEQ/SEED present in WB */
+#define E1000_ADVTXD_IDX_SHIFT    4  /* Adv desc Index shift */
+#define E1000_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define E1000_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/
+#define E1000_ADVTXD_POPTS_IPSEC     0x00000400 /* IPSec offload request */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+       __le32 vlan_macip_lens;
+       __le32 seqnum_seed;
+       __le32 type_tucmd_mlhl;
+       __le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_VLAN_SHIFT     16  /* Adv ctxt vlan tag shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_IPV6    0x00000000  /* IP Packet Type: 0=IPv6 */
+#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000  /* L4 Packet TYPE of UDP */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000  /* L4 Packet TYPE of SCTP */
+#define E1000_ADVTXD_TUCMD_IPSEC_TYPE_ESP    0x00002000 /* IPSec Type ESP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN  0x00004000
+#define E1000_ADVTXD_TUCMD_MKRREQ  0x00002000 /* Req requires Markers and CRC */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+#define E1000_ADVTXD_IPSEC_SA_INDEX_MASK     0x000000FF
+/* Adv ctxt IPSec ESP len mask */
+#define E1000_ADVTXD_IPSEC_ESP_LEN_MASK      0x000000FF
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+#define E1000_TXDCTL_SWFLSH        0x04000000 /* Tx Desc. write-back flushing */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+#define E1000_TXDCTL_PRIORITY      0x08000000
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+#define E1000_RXDCTL_SWFLSH        0x04000000 /* Rx Desc. write-back flushing */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
+#define E1000_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define E1000_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT_82576 24 /* Tx CPUID */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT_82576 24 /* Rx CPUID */
+
+/* Additional interrupt register bit definitions */
+#define E1000_ICR_LSECPNS       0x00000020          /* PN threshold - server */
+#define E1000_IMS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
+#define E1000_ICS_LSECPNS       E1000_ICR_LSECPNS   /* PN threshold - server */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_IMM_INT         (1 << 29)
+#define E1000_ETQF_1588            (1 << 30)
+#define E1000_ETQF_QUEUE_ENABLE    (1 << 31)
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ *                   to avoid filter collisions later. Add new filters
+ *                   here!!
+ *
+ * Current filters:
+ *    EAPOL 802.1x (0x888e): Filter 0
+ */
+#define E1000_ETQF_FILTER_EAPOL          0
+
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_ADDR_BP 0x20000000
+#define E1000_FTQF_MASK_DEST_ADDR_BP   0x40000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575          0x0400
+#define MAX_NUM_VFS                   8
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_LLE_SHIFT        16
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
+
+#define E1000_VMOLR_VPE        0x00800000 /* VLAN promiscuous enable */
+#define E1000_VMOLR_UPE        0x20000000 /* Unicast promisuous enable */
+#define E1000_DVMOLR_HIDVLAN   0x20000000 /* Vlan hiding enable */
+#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
+
+#define E1000_PBRWAC_WALPB     0x00000007 /* Wrap around event on LAN Rx PB */
+#define E1000_PBRWAC_PBE       0x00000008 /* Rx packet buffer empty */
+
+#define E1000_VLVF_ARRAY_SIZE     32
+#define E1000_VLVF_VLANID_MASK    0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT  12
+#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN          0x00100000
+#define E1000_VLVF_VLANID_ENABLE  0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_TCTL_EXT_COLD       0x000FFC00
+#define E1000_TCTL_EXT_COLD_SHIFT 10
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
+#define ALL_QUEUES   0xFFFF
+
+/* Rx packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
+void e1000_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable);
+void e1000_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf);
+void e1000_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable);
+s32 e1000_init_nvm_params_82575(struct e1000_hw *hw);
+
+enum e1000_promisc_type {
+       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
+       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
+       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
+       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
+       e1000_num_promisc_types
+};
+
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type type);
+u16 e1000_rxpbs_adjust_82580(u32 data);
+s32 e1000_set_eee_i350(struct e1000_hw *);
+#endif /* _E1000_82575_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_api.c b/lib/librte_pmd_igb/igb/e1000_api.c
new file mode 100644 (file)
index 0000000..fc41f73
--- /dev/null
@@ -0,0 +1,1152 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/**
+ *  e1000_init_mac_params - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the MAC
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->mac.ops.init_params) {
+               ret_val = hw->mac.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("MAC Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("mac.init_mac_params was NULL\n");
+               ret_val = -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the NVM
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->nvm.ops.init_params) {
+               ret_val = hw->nvm.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("NVM Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("nvm.init_nvm_params was NULL\n");
+               ret_val = -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->phy.ops.init_params) {
+               ret_val = hw->phy.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("PHY Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("phy.init_phy_params was NULL\n");
+               ret_val =  -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params - Initialize mailbox function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mbx_params(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       if (hw->mbx.ops.init_params) {
+               ret_val = hw->mbx.ops.init_params(hw);
+               if (ret_val) {
+                       DEBUGOUT("Mailbox Initialization Error\n");
+                       goto out;
+               }
+       } else {
+               DEBUGOUT("mbx.init_mbx_params was NULL\n");
+               ret_val =  -E1000_ERR_CONFIG;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  device ID stored in the hw structure.
+ *  MUST BE FIRST FUNCTION CALLED (explicitly or through
+ *  e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_set_mac_type");
+
+       switch (hw->device_id) {
+       case E1000_DEV_ID_82575EB_COPPER:
+       case E1000_DEV_ID_82575EB_FIBER_SERDES:
+       case E1000_DEV_ID_82575GB_QUAD_COPPER:
+               mac->type = e1000_82575;
+               break;
+       case E1000_DEV_ID_82576:
+       case E1000_DEV_ID_82576_FIBER:
+       case E1000_DEV_ID_82576_SERDES:
+       case E1000_DEV_ID_82576_QUAD_COPPER:
+       case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+       case E1000_DEV_ID_82576_NS:
+       case E1000_DEV_ID_82576_NS_SERDES:
+       case E1000_DEV_ID_82576_SERDES_QUAD:
+               mac->type = e1000_82576;
+               break;
+       case E1000_DEV_ID_82580_COPPER:
+       case E1000_DEV_ID_82580_FIBER:
+       case E1000_DEV_ID_82580_SERDES:
+       case E1000_DEV_ID_82580_SGMII:
+       case E1000_DEV_ID_82580_COPPER_DUAL:
+       case E1000_DEV_ID_82580_QUAD_FIBER:
+       case E1000_DEV_ID_DH89XXCC_SGMII:
+       case E1000_DEV_ID_DH89XXCC_SERDES:
+       case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+       case E1000_DEV_ID_DH89XXCC_SFP:
+               mac->type = e1000_82580;
+               break;
+       case E1000_DEV_ID_I350_COPPER:
+       case E1000_DEV_ID_I350_FIBER:
+       case E1000_DEV_ID_I350_SERDES:
+       case E1000_DEV_ID_I350_SGMII:
+       case E1000_DEV_ID_I350_DA4:
+               mac->type = e1000_i350;
+               break;
+       case E1000_DEV_ID_82576_VF:
+               mac->type = e1000_vfadapt;
+               break;
+       case E1000_DEV_ID_I350_VF:
+               mac->type = e1000_vfadapt_i350;
+               break;
+       default:
+               /* Should never have loaded on this device */
+               ret_val = -E1000_ERR_MAC_INIT;
+               break;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_init_funcs - Initializes function pointers
+ *  @hw: pointer to the HW structure
+ *  @init_device: TRUE will initialize the rest of the function pointers
+ *                 getting the device ready for use.  FALSE will only set
+ *                 MAC type and the function pointers for the other init
+ *                 functions.  Passing FALSE will not generate any hardware
+ *                 reads or writes.
+ *
+ *  This function must be called by a driver in order to use the rest
+ *  of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+       s32 ret_val;
+
+       /* Can't do much good without knowing the MAC type. */
+       ret_val = e1000_set_mac_type(hw);
+       if (ret_val) {
+               DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+               goto out;
+       }
+
+       if (!hw->hw_addr) {
+               DEBUGOUT("ERROR: Registers not mapped\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       /*
+        * Init function pointers to generic implementations. We do this first
+        * allowing a driver module to override it afterward.
+        */
+       e1000_init_mac_ops_generic(hw);
+       e1000_init_phy_ops_generic(hw);
+       e1000_init_nvm_ops_generic(hw);
+       e1000_init_mbx_ops_generic(hw);
+
+       /*
+        * Set up the init function pointers. These are functions within the
+        * adapter family file that sets up function pointers for the rest of
+        * the functions in that family.
+        */
+       switch (hw->mac.type) {
+       case e1000_82575:
+       case e1000_82576:
+       case e1000_82580:
+       case e1000_i350:
+               e1000_init_function_pointers_82575(hw);
+               break;
+       case e1000_vfadapt:
+               e1000_init_function_pointers_vf(hw);
+               break;
+       case e1000_vfadapt_i350:
+               e1000_init_function_pointers_vf(hw);
+               break;
+       default:
+               DEBUGOUT("Hardware not supported\n");
+               ret_val = -E1000_ERR_CONFIG;
+               break;
+       }
+
+       /*
+        * Initialize the rest of the function pointers. These require some
+        * register reads/writes in some cases.
+        */
+       if (!(ret_val) && init_device) {
+               ret_val = e1000_init_mac_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_nvm_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_phy_params(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = e1000_init_mbx_params(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adapter is attached and stores it in the hw structure. This is a
+ *  function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.get_bus_info)
+               return hw->mac.ops.get_bus_info(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  This clears the VLAN filter table on the adapter. This is a function
+ *  pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.clear_vfta)
+               hw->mac.ops.clear_vfta(hw);
+}
+
+/**
+ *  e1000_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+       if (hw->mac.ops.write_vfta)
+               hw->mac.ops.write_vfta(hw, offset, value);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+                               u32 mc_addr_count)
+{
+       if (hw->mac.ops.update_mc_addr_list)
+               hw->mac.ops.update_mc_addr_list(hw, mc_addr_list,
+                                               mc_addr_count);
+}
+
+/**
+ *  e1000_force_mac_fc - Force MAC flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings. Currently no func pointer exists
+ *  and all implementations are handled in the generic version of this
+ *  function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+       return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ *  e1000_check_for_link - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.check_for_link)
+               return hw->mac.ops.check_for_link(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_check_mng_mode - Check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.check_mng_mode)
+               return hw->mac.ops.check_mng_mode(hw);
+
+       return FALSE;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+       return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ *  e1000_reset_hw - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.reset_hw)
+               return hw->mac.ops.reset_hw(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_init_hw - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation. This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.init_hw)
+               return hw->mac.ops.init_hw(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_link - Configures link and flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  This configures link and flow control settings for the adapter. This
+ *  is a function pointer entry point called by drivers. While modules can
+ *  also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.setup_link)
+               return hw->mac.ops.setup_link(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_get_speed_and_duplex - Returns current speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to a 16-bit value to store the speed
+ *  @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ *  This returns the speed and duplex of the adapter in the two 'out'
+ *  variables passed in. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+       if (hw->mac.ops.get_link_up_info)
+               return hw->mac.ops.get_link_up_info(hw, speed, duplex);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_led - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.setup_led)
+               return hw->mac.ops.setup_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_cleanup_led - Restores SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This restores the SW controllable LED to the value saved off by
+ *  e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.cleanup_led)
+               return hw->mac.ops.cleanup_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led - Blink SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This starts the adapter LED blinking. Request the LED to be setup first
+ *  and cleaned up after. This is a function pointer entry point called by
+ *  drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.blink_led)
+               return hw->mac.ops.blink_led(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_id_led_init - store LED configurations in SW
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the LED config in SW. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_id_led_init(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.id_led_init)
+               return hw->mac.ops.id_led_init(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.led_on)
+               return hw->mac.ops.led_on(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.led_off)
+               return hw->mac.ops.led_off(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_adaptive - Reset adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the adaptive IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+       e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_update_adaptive - Update adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates adapter IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+       e1000_update_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_disable_pcie_master - Disable PCI-Express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. Currently no func pointer exists and all implementations are
+ *  handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+       return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ *  e1000_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.config_collision_dist)
+               hw->mac.ops.config_collision_dist(hw);
+}
+
+/**
+ *  e1000_rar_set - Sets a receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: address to set the RAR to
+ *  @index: the RAR to set
+ *
+ *  Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       if (hw->mac.ops.rar_set)
+               hw->mac.ops.rar_set(hw, addr, index);
+}
+
+/**
+ *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ *  @hw: pointer to the HW structure
+ *
+ *  Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.validate_mdi_setting)
+               return hw->mac.ops.validate_mdi_setting(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_hash_mc_addr - Determines address location in multicast table
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: Multicast address to hash.
+ *
+ *  This hashes an address to determine its location in the multicast
+ *  table. Currently no func pointer exists and all implementations
+ *  are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+       return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+       return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ *  e1000_mng_host_if_write - Writes to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
+                            u16 offset, u8 *sum)
+{
+       if (hw->mac.ops.mng_host_if_write)
+               return hw->mac.ops.mng_host_if_write(hw, buffer, length,
+                                                    offset, sum);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                               struct e1000_host_mng_command_header *hdr)
+{
+       if (hw->mac.ops.mng_write_cmd_header)
+               return hw->mac.ops.mng_write_cmd_header(hw, hdr);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+       if (hw->mac.ops.mng_enable_host_if)
+               return hw->mac.ops.mng_enable_host_if(hw);
+
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_wait_autoneg - Waits for autonegotiation completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for autoneg to complete. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.wait_autoneg)
+               return hw->mac.ops.wait_autoneg(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block - Verifies PHY can be reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if the PHY is in a state that can be reset or if manageability
+ *  has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.check_reset_block)
+               return hw->phy.ops.check_reset_block(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg - Reads PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the buffer to store the 16-bit read.
+ *
+ *  Reads the PHY register and returns the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       if (hw->phy.ops.read_reg)
+               return hw->phy.ops.read_reg(hw, offset, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg - Writes PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       if (hw->phy.ops.write_reg)
+               return hw->phy.ops.write_reg(hw, offset, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_phy - Generic release PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return if silicon family does not require a semaphore when accessing the
+ *  PHY.
+ **/
+void e1000_release_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.release)
+               hw->phy.ops.release(hw);
+}
+
+/**
+ *  e1000_acquire_phy - Generic acquire PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family does not require a semaphore when
+ *  accessing the PHY.
+ **/
+s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.acquire)
+               return hw->phy.ops.acquire(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_kmrn_reg - Reads register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the location to store the 16-bit value read.
+ *
+ *  Reads a register out of the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_write_kmrn_reg - Writes register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes a register to the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_get_cable_length - Retrieves cable length estimation
+ *  @hw: pointer to the HW structure
+ *
+ *  This function estimates the cable length and stores them in
+ *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.get_cable_length)
+               return hw->phy.ops.get_cable_length(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_info - Retrieves PHY information from registers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function gets some information from various PHY registers and
+ *  populates hw->phy values with it. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.get_info)
+               return hw->phy.ops.get_info(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_hw_reset - Hard PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a hard PHY reset. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.reset)
+               return hw->phy.ops.reset(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_commit - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.commit)
+               return hw->phy.ops.commit(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d0_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+       if (hw->phy.ops.set_d0_lplu_state)
+               return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+       if (hw->phy.ops.set_d3_lplu_state)
+               return hw->phy.ops.set_d3_lplu_state(hw, active);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr - Reads MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MAC address out of the adapter and stores it in the HW structure.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.read_mac_addr)
+               return hw->mac.ops.read_mac_addr(hw);
+
+       return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ *  e1000_read_pba_string - Read device part number string
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+       return e1000_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ *  e1000_read_pba_length - Read device part number string length
+ *  @hw: pointer to the HW structure
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number length from the EEPROM and
+ *  stores the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size)
+{
+       return e1000_read_pba_length_generic(hw, pba_num_size);
+}
+
+/**
+ *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Validates the NVM checksum is correct. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.validate)
+               return hw->nvm.ops.validate(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the NVM checksum. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.update)
+               return hw->nvm.ops.update(hw);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+       if (hw->nvm.ops.reload)
+               hw->nvm.ops.reload(hw);
+}
+
+/**
+ *  e1000_read_nvm - Reads NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to read
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       if (hw->nvm.ops.read)
+               return hw->nvm.ops.read(hw, offset, words, data);
+
+       return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_write_nvm - Writes to NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to write
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       if (hw->nvm.ops.write)
+               return hw->nvm.ops.write(hw, offset, words, data);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+                              u8 data)
+{
+       return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.power_up)
+               hw->phy.ops.power_up(hw);
+
+       e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+       if (hw->phy.ops.power_down)
+               hw->phy.ops.power_down(hw);
+}
+
+/**
+ *  e1000_power_up_fiber_serdes_link - Power up serdes link
+ *  @hw: pointer to the HW structure
+ *
+ *  Power on the optics and PCS.
+ **/
+void e1000_power_up_fiber_serdes_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.power_up_serdes)
+               hw->mac.ops.power_up_serdes(hw);
+}
+
+/**
+ *  e1000_shutdown_fiber_serdes_link - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  Shutdown the optics and PCS on driver unload.
+ **/
+void e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw)
+{
+       if (hw->mac.ops.shutdown_serdes)
+               hw->mac.ops.shutdown_serdes(hw);
+}
+
diff --git a/lib/librte_pmd_igb/igb/e1000_api.h b/lib/librte_pmd_igb/igb/e1000_api.h
new file mode 100644 (file)
index 0000000..daf8642
--- /dev/null
@@ -0,0 +1,156 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void    e1000_init_function_pointers_82575(struct e1000_hw *hw);
+extern void    e1000_rx_fifo_flush_82575(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_vf(struct e1000_hw *hw);
+extern void    e1000_power_up_fiber_serdes_link(struct e1000_hw *hw);
+extern void    e1000_shutdown_fiber_serdes_link(struct e1000_hw *hw);
+
+s32  e1000_set_mac_type(struct e1000_hw *hw);
+s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32  e1000_init_mac_params(struct e1000_hw *hw);
+s32  e1000_init_nvm_params(struct e1000_hw *hw);
+s32  e1000_init_phy_params(struct e1000_hw *hw);
+s32  e1000_init_mbx_params(struct e1000_hw *hw);
+s32  e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32  e1000_force_mac_fc(struct e1000_hw *hw);
+s32  e1000_check_for_link(struct e1000_hw *hw);
+s32  e1000_reset_hw(struct e1000_hw *hw);
+s32  e1000_init_hw(struct e1000_hw *hw);
+s32  e1000_setup_link(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
+                                u16 *duplex);
+s32  e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw,
+                               u8 *mc_addr_list, u32 mc_addr_count);
+s32  e1000_setup_led(struct e1000_hw *hw);
+s32  e1000_cleanup_led(struct e1000_hw *hw);
+s32  e1000_check_reset_block(struct e1000_hw *hw);
+s32  e1000_blink_led(struct e1000_hw *hw);
+s32  e1000_led_on(struct e1000_hw *hw);
+s32  e1000_led_off(struct e1000_hw *hw);
+s32 e1000_id_led_init(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32  e1000_get_cable_length(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+                               u32 offset, u8 data);
+s32  e1000_get_phy_info(struct e1000_hw *hw);
+void e1000_release_phy(struct e1000_hw *hw);
+s32  e1000_acquire_phy(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset(struct e1000_hw *hw);
+s32  e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32  e1000_read_mac_addr(struct e1000_hw *hw);
+s32  e1000_read_pba_string(struct e1000_hw *hw, u8 *pba_num, 
+                           u32 pba_num_size);
+s32  e1000_read_pba_length(struct e1000_hw *hw, u32 *pba_num_size);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+                     u16 *data);
+s32  e1000_wait_autoneg(struct e1000_hw *hw);
+s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write(struct e1000_hw *hw,
+                             u8 *buffer, u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                                struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
+                                    u8 *buffer, u16 length);
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the Rx descriptor with EOP set
+ *      error = the 8 bit error field of the Rx descriptor with EOP set
+ *      length = the sum of all the length fields of the Rx descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
+    (e1000_tbi_sbp_enabled_82543(a) && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= (max_frame_size + 1))) : \
+          (((length) > min_frame_size) && \
+           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_defines.h b/lib/librte_pmd_igb/igb/e1000_defines.h
new file mode 100644 (file)
index 0000000..a7be67c
--- /dev/null
@@ -0,0 +1,1733 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
+#define E1000_WUC_PPROXYE    0x00000010 /* Protocol Proxy Enable */
+#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_FW_RST 0x80000000 /* Wake on FW Reset Enable */
+#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS  0x000F0000 /*Mask for the 4 flexible filters */
+/*
+ * For 82576 to utilize Extended filter masks in addition to
+ * existing (filter) masks
+ */
+#define E1000_WUFC_EXT_FLX_FILTERS      0x00300000 /* Ext. FLX filter mask */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+#define E1000_WUS_ARP          E1000_WUFC_ARP
+#define E1000_WUS_IPV4         E1000_WUFC_IPV4
+#define E1000_WUS_IPV6         E1000_WUFC_IPV6
+#define E1000_WUS_FLX0         E1000_WUFC_FLX0
+#define E1000_WUS_FLX1         E1000_WUFC_FLX1
+#define E1000_WUS_FLX2         E1000_WUFC_FLX2
+#define E1000_WUS_FLX3         E1000_WUFC_FLX3
+#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+/* Two Extended Flexible Filters are supported (82576) */
+#define E1000_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
+#define E1000_FHFT_LENGTH_OFFSET        0xFC /* Length byte in FHFT */
+#define E1000_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+/* Reserved (bits 4,5) in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Definable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Definable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Definable Pin 6 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* Direction of SDP3 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD    0x00004000
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_82580_MASK 0x01C00000 /*82580 bit 24:22*/
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000 /* Int delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+/* IAME enable bit (27) was removed in >= 82575 */
+#define E1000_CTRL_EXT_IAME          0x08000000 /* Int acknowledge Auto-mask */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error
+                                                  * detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity
+                                                  * error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT   16
+#define E1000_I2CCMD_REG_ADDR         0x00FF0000
+#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
+#define E1000_I2CCMD_PHY_ADDR         0x07000000
+#define E1000_I2CCMD_OPCODE_READ      0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
+#define E1000_I2CCMD_RESET            0x10000000
+#define E1000_I2CCMD_READY            0x20000000
+#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
+#define E1000_I2CCMD_ERROR            0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR  255
+#define E1000_I2CCMD_PHY_TIMEOUT      200
+#define E1000_IVAR_VALID        0x80
+#define E1000_GPIE_NSICR        0x00000001
+#define E1000_GPIE_MSIX_MODE    0x00000010
+#define E1000_GPIE_EIAME        0x40000000
+#define E1000_GPIE_PBA          0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_LB    0x00040000
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_ENABLE_MASK                 0x00000007
+#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+/* Enable IP address filtering */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+#define E1000_MANC_MPROXYE       0x40000000 /* Mngment Proxy Enable */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OS2BMC is enabled or not */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+#define E1000_MANC2H_PORT_623    0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664    0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623      0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664      0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promisc enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promisc enable */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* Rx desc min thresh size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* Rx desc min thresh size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x01
+#define E1000_SWFW_PHY0_SM  0x02
+#define E1000_SWFW_PHY1_SM  0x04
+#define E1000_SWFW_CSR_SM   0x08
+#define E1000_SWFW_PHY2_SM  0x20
+#define E1000_SWFW_PHY3_SM  0x40
+#define E1000_SWFW_SW_MNG_SM 0x400
+
+/* FACTPS Definitions */
+#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master reqs */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock
+                                             * indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through
+                                               * PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external
+                                           * LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000  /* D3 WUC */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to ME */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_PCS_CFG_PCS_EN             8
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_10            0
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
+#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
+#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
+#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
+#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
+#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_10          0
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
+#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
+#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
+#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
+#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200  /* Lan Init Completion by NVM */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state.
+                                                 * Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master request status */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution
+                                            * disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed 50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /*PCI-X bus speed 100-133 MHz*/
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME   20
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX  (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG       (ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable Tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+#define E1000_RFCTL_LEF                 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE       4
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_SPD_EN             0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_6K  0x0006    /* 6KB */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_10K 0x000A    /* 10KB */
+#define E1000_PBA_12K 0x000C    /* 12KB */
+#define E1000_PBA_14K 0x000E    /* 14KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+#define E1000_PBA_18K 0x0012
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_26K 0x001A
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_35K 0x0023
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB */
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+#define E1000_PBS_24K E1000_PBA_24K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* Rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver
+                                            * should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* Q0 Rx desc FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* Q0 Tx desc FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity err */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* Q1 Rx desc FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* Q1 Tx desc FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW
+                                            * bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates
+                                            * an interrupt */
+#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
+#define E1000_ICR_EPRST         0x00100000 /* ME hardware reset occurs */
+#define E1000_ICR_FER           0x00400000 /* Fatal Error */
+
+#define E1000_ICR_THS           0x00800000 /* ICR.THS: Thermal Sensor Event*/
+#define E1000_ICR_MDDET         0x10000000 /* Malicious Driver Detect */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ)
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* Rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
+                                                         * parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
+                                                         * error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
+                                                         * parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+#define E1000_IMS_FER           E1000_ICR_FER /* Fatal Error */
+
+#define E1000_IMS_THS           E1000_ICR_THS /* ICR.TS: Thermal Sensor Event*/
+#define E1000_IMS_MDDET         E1000_ICR_MDDET /* Malicious Driver Detect */
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Tx desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* Rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* Q0 Rx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* Q0 Tx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer
+                                                         * parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity
+                                                         * error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* Q1 Rx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* Q1 Tx desc FIFO
+                                                         * parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+#define E1000_EITR_ITR_INT_MASK 0x0000FFFF
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH    0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH    0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH    0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN       0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH   0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_QUEUE_MASK_82575 0x000C0000
+#define E1000_RAH_POOL_MASK     0x03FC0000
+#define E1000_RAH_POOL_SHIFT    18
+#define E1000_RAH_POOL_1        0x00040000
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX      15
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_CC         0x10000000        /* Receive config change */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
+
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* Tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable Tx timestamping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* Rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* Rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable Rx timestamping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+/* TUPLE Filtering Configuration */
+#define E1000_TTQF_DISABLE_MASK   0xF0008000     /* TTQF Disable Mask */
+#define E1000_TTQF_QUEUE_ENABLE   0x100          /* TTQF Queue Enable Bit */
+#define E1000_TTQF_PROTOCOL_MASK  0xFF           /* TTQF Protocol Mask */
+/* TTQF TCP Bit, shift with E1000_TTQF_PROTOCOL SHIFT */
+#define E1000_TTQF_PROTOCOL_TCP   0x0
+/* TTQF UDP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_UDP   0x1
+/* TTQF SCTP Bit, shift with E1000_TTQF_PROTOCOL_SHIFT */
+#define E1000_TTQF_PROTOCOL_SCTP  0x2
+#define E1000_TTQF_PROTOCOL_SHIFT 5              /* TTQF Protocol Shift */
+#define E1000_TTQF_QUEUE_SHIFT    16             /* TTQF Queue Shfit */
+#define E1000_TTQF_RX_QUEUE_MASK  0x70000        /* TTQF Queue Mask */
+#define E1000_TTQF_MASK_ENABLE    0x10000000     /* TTQF Mask Enable Bit */
+#define E1000_IMIR_CLEAR_MASK     0xF001FFFF     /* IMIR Reg Clear Mask */
+#define E1000_IMIR_PORT_BYPASS    0x20000        /* IMIR Port Bypass Bit */
+#define E1000_IMIR_PRIORITY_SHIFT 29             /* IMIR Priority Shift */
+#define E1000_IMIREXT_CLEAR_MASK  0x7FFFF        /* IMIREXT Reg Clear Mask */
+
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
+#define E1000_THSTAT_LOW_EVENT      0x20000000  /* Low thermal threshold */
+#define E1000_THSTAT_MID_EVENT      0x00200000  /* Mid thermal threshold */
+#define E1000_THSTAT_HIGH_EVENT     0x00002000  /* High thermal threshold */
+#define E1000_THSTAT_PWR_DOWN       0x00000001  /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE  0x00000002  /* Link Speed Throttle Event */
+
+/* Powerville EEE defines */
+#define E1000_IPCNFG_EEE_1G_AN      0x00000008  /* IPCNFG EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN    0x00000004  /* IPCNFG EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN        0x00010000  /* EEER Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN        0x00020000  /* EEER Rx LPI Enable */
+#define E1000_EEER_LPI_FC           0x00040000  /* EEER Enable on Flow Control*/
+/* EEE status */
+#define E1000_EEER_EEE_NEG          0x20000000  /* EEE capability negotiated */
+#define E1000_EEER_RX_LPI_STATUS    0x40000000  /* Rx in LPI state */
+#define E1000_EEER_TX_LPI_STATUS    0x80000000  /* Tx in LPI state */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define E1000_GCR_CAP_VER2              0x00040000
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                           E1000_GCR_RXDSCW_NO_SNOOP      | \
+                           E1000_GCR_RXDSCR_NO_SNOOP      | \
+                           E1000_GCR_TXD_NO_SNOOP         | \
+                           E1000_GCR_TXDSCW_NO_SNOOP      | \
+                           E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE      0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR  0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+#define E1000_EECD_BLOCKED   0x00008000 /* Bit banging access blocked flag */
+#define E1000_EECD_ABORT     0x00010000 /* NVM operation aborted flag */
+#define E1000_EECD_TIMEOUT   0x00020000 /* NVM read operation timeout flag */
+#define E1000_EECD_ERROR_CLR 0x00040000 /* NVM error status clear bit */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_NVM_GRANT_ATTEMPTS
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
+#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_NVM_RW_REG_DATA   16  /* Offset to data in NVM read/write regs */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_VERSION                0x0005
+#define NVM_SERDES_AMPLITUDE       0x0006 /* SERDES output amplitude */
+#define NVM_PHY_CLASS_WORD         0x0007
+#define NVM_INIT_CONTROL1_REG      0x000A
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_FLASH_VERSION          0x0032
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+#define NVM_COMPATIBILITY_REG_3    0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+#define NVM_WORD0F_ANE              0x0800
+#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
+#define NVM_WORD0F_LPLU             0x0001
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH             11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_MAC_ADDR_OFFSET        0
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_RESERVED_WORD          0xFFFF
+#define NVM_PHY_CLASS_A            0x8000
+#define NVM_SERDES_AMPLITUDE_MASK  0x000F
+#define NVM_SIZE_MASK              0x1C00
+#define NVM_SIZE_SHIFT             10
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+#define NVM_SWDPIO_EXT_SHIFT       4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erase/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+#define NVM_STATUS_WEN_SPI         0x02
+#define NVM_STATUS_BP0_SPI         0x04
+#define NVM_STATUS_BP1_SPI         0x08
+#define NVM_STATUS_WPEN_SPI        0x80
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+                              (ID_LED_OFF1_OFF2 <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER        0xE6
+#define PCIX_STATUS_REGISTER_LO      0xE8
+#define PCIX_STATUS_REGISTER_HI      0xEA
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+#define PCIE_DEVICE_CONTROL2         0x28
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+#define PCIX_STATUS_LO_FUNC_MASK     0x7
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+#define PCIE_LINK_SPEED_MASK         0x0F
+#define PCIE_LINK_SPEED_2500         0x01
+#define PCIE_LINK_SPEED_5000         0x02
+#define PCIE_DEVICE_CONTROL2_16ms    0x0005
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN                 6
+#endif
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1011_I_REV_4     0x04
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define M88E1112_E_PHY_ID    0x01410C90
+#define I347AT4_E_PHY_ID     0x01410DC0
+#define M88E1340M_E_PHY_ID   0x01410DF0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define I82580_I_PHY_ID      0x015403A0
+#define I350_I_PHY_ID        0x015403B0
+#define IGP04E1000_E_PHY_ID  0x02A80391
+#define M88_VENDOR           0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reverse enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000 /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Tx */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+/*
+ * 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5       0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25        0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0         0x0000 /* NO  TX_CLK */
+
+/* M88E1111 Specific Registers */
+#define M88E1111_PHY_PAGE_SELECT1       0x16  /* for registers 0-28 */
+#define M88E1111_PHY_PAGE_SELECT2       0x1D  /* for registers 30-31 */
+
+/* M88E1111 page select register mask */
+#define M88E1111_PHY_PAGE_SELECT_MASK1  0xFF
+#define M88E1111_PHY_PAGE_SELECT_MASK2  0x3F
+
+/* Intel I347AT4 Registers */
+
+#define I347AT4_PCDL            0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC            0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT     0x16
+
+/* I347AT4 Extended PHY Specific Control Register */
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK   0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X     0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X     0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X     0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X     0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X     0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X     0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X     0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X     0x7000
+
+/* I347AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT  0x0400 /* 0=cm 1=meters */
+
+/* M88E1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE       0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+/* LinkSec register fields */
+#define E1000_LSECTXCAP_SUM_MASK        0x00FF0000
+#define E1000_LSECTXCAP_SUM_SHIFT       16
+#define E1000_LSECRXCAP_SUM_MASK        0x00FF0000
+#define E1000_LSECRXCAP_SUM_SHIFT       16
+
+#define E1000_LSECTXCTRL_EN_MASK        0x00000003
+#define E1000_LSECTXCTRL_DISABLE        0x0
+#define E1000_LSECTXCTRL_AUTH           0x1
+#define E1000_LSECTXCTRL_AUTH_ENCRYPT   0x2
+#define E1000_LSECTXCTRL_AISCI          0x00000020
+#define E1000_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
+#define E1000_LSECTXCTRL_RSV_MASK       0x000000D8
+
+#define E1000_LSECRXCTRL_EN_MASK        0x0000000C
+#define E1000_LSECRXCTRL_EN_SHIFT       2
+#define E1000_LSECRXCTRL_DISABLE        0x0
+#define E1000_LSECRXCTRL_CHECK          0x1
+#define E1000_LSECRXCTRL_STRICT         0x2
+#define E1000_LSECRXCTRL_DROP           0x3
+#define E1000_LSECRXCTRL_PLSH           0x00000040
+#define E1000_LSECRXCTRL_RP             0x00000080
+#define E1000_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA          0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK     0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT     14
+#define E1000_RTTBCNRC_RF_INT_MASK     \
+       (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coalescing
+                                                    * Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coalescing Rx
+                                                    * Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT       16
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe
+                                                    * transactions */
+#define E1000_DMACR_DMAC_LX_SHIFT       28
+#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coalescing Transmit
+                                                    * Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate
+                                                    * Threshold */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx packet rate in
+                                                    * current window */
+
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Traffic
+                                                    * Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* Flow ctrl Rx Threshold
+                                                    * High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT      4
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision based
+                                                      on DMA coal */
+
+/* Proxy Filer Control */
+#define E1000_PROXYFC_D0               0x00000001  /* Enable offload in D0 */
+#define E1000_PROXYFC_EX               0x00000004  /* Directed exact proxy */
+#define E1000_PROXYFC_MC               0x00000008  /* Directed Multicast
+                                                    * Proxy */
+#define E1000_PROXYFC_BC               0x00000010  /* Broadcast Proxy Enable */
+#define E1000_PROXYFC_ARP_DIRECTED     0x00000020  /* Directed ARP Proxy
+                                                    * Enable */
+#define E1000_PROXYFC_IPV4             0x00000040  /* Directed IPv4 Enable */
+#define E1000_PROXYFC_IPV6             0x00000080  /* Directed IPv6 Enable */
+#define E1000_PROXYFC_NS               0x00000200  /* IPv4 Neighborhood
+                                                    * Solicitation */
+#define E1000_PROXYFC_ARP              0x00000800  /* ARP Request Proxy
+                                                    * Enable */
+/* Proxy Status */
+#define E1000_PROXYS_CLEAR             0xFFFFFFFF  /* Clear */
+
+/* Firmware Status */
+#define E1000_FWSTS_FWRI               0x80000000 /* Firmware Reset
+                                                   * Indication */
+
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_hw.h b/lib/librte_pmd_igb/igb/e1000_hw.h
new file mode 100644 (file)
index 0000000..bed673b
--- /dev/null
@@ -0,0 +1,767 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576                    0x10C9
+#define E1000_DEV_ID_82576_FIBER              0x10E6
+#define E1000_DEV_ID_82576_SERDES             0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER        0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2    0x1526
+#define E1000_DEV_ID_82576_NS                 0x150A
+#define E1000_DEV_ID_82576_NS_SERDES          0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD        0x150D
+#define E1000_DEV_ID_82576_VF                 0x10CA
+#define E1000_DEV_ID_I350_VF                  0x1520
+#define E1000_DEV_ID_82575EB_COPPER           0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES     0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER      0x10D6
+#define E1000_DEV_ID_82580_COPPER             0x150E
+#define E1000_DEV_ID_82580_FIBER              0x150F
+#define E1000_DEV_ID_82580_SERDES             0x1510
+#define E1000_DEV_ID_82580_SGMII              0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL        0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER         0x1527
+#define E1000_DEV_ID_I350_COPPER              0x1521
+#define E1000_DEV_ID_I350_FIBER               0x1522
+#define E1000_DEV_ID_I350_SERDES              0x1523
+#define E1000_DEV_ID_I350_SGMII               0x1524
+#define E1000_DEV_ID_I350_DA4                 0x1546
+#define E1000_DEV_ID_DH89XXCC_SGMII           0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES          0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE       0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP             0x0440
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+#define E1000_FUNC_2     2
+#define E1000_FUNC_3     3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
+
+enum e1000_mac_type {
+       e1000_undefined = 0,
+       e1000_82575,
+       e1000_82576,
+       e1000_82580,
+       e1000_i350,
+       e1000_vfadapt,
+       e1000_vfadapt_i350,
+       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
+};
+
+enum e1000_media_type {
+       e1000_media_type_unknown = 0,
+       e1000_media_type_copper = 1,
+       e1000_media_type_fiber = 2,
+       e1000_media_type_internal_serdes = 3,
+       e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+       e1000_nvm_unknown = 0,
+       e1000_nvm_none,
+       e1000_nvm_eeprom_spi,
+       e1000_nvm_eeprom_microwire,
+       e1000_nvm_flash_hw,
+       e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+       e1000_nvm_override_none = 0,
+       e1000_nvm_override_spi_small,
+       e1000_nvm_override_spi_large,
+       e1000_nvm_override_microwire_small,
+       e1000_nvm_override_microwire_large
+};
+
+enum e1000_phy_type {
+       e1000_phy_unknown = 0,
+       e1000_phy_none,
+       e1000_phy_m88,
+       e1000_phy_igp,
+       e1000_phy_igp_2,
+       e1000_phy_gg82563,
+       e1000_phy_igp_3,
+       e1000_phy_ife,
+       e1000_phy_82580,
+       e1000_phy_vf,
+};
+
+enum e1000_bus_type {
+       e1000_bus_type_unknown = 0,
+       e1000_bus_type_pci,
+       e1000_bus_type_pcix,
+       e1000_bus_type_pci_express,
+       e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+       e1000_bus_speed_unknown = 0,
+       e1000_bus_speed_33,
+       e1000_bus_speed_66,
+       e1000_bus_speed_100,
+       e1000_bus_speed_120,
+       e1000_bus_speed_133,
+       e1000_bus_speed_2500,
+       e1000_bus_speed_5000,
+       e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+       e1000_bus_width_unknown = 0,
+       e1000_bus_width_pcie_x1,
+       e1000_bus_width_pcie_x2,
+       e1000_bus_width_pcie_x4 = 4,
+       e1000_bus_width_pcie_x8 = 8,
+       e1000_bus_width_32,
+       e1000_bus_width_64,
+       e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+       e1000_1000t_rx_status_not_ok = 0,
+       e1000_1000t_rx_status_ok,
+       e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+       e1000_rev_polarity_normal = 0,
+       e1000_rev_polarity_reversed,
+       e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+       e1000_fc_none = 0,
+       e1000_fc_rx_pause,
+       e1000_fc_tx_pause,
+       e1000_fc_full,
+       e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+       e1000_ms_hw_default = 0,
+       e1000_ms_force_master,
+       e1000_ms_force_slave,
+       e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+       e1000_smart_speed_default = 0,
+       e1000_smart_speed_on,
+       e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+       e1000_serdes_link_down = 0,
+       e1000_serdes_link_autoneg_progress,
+       e1000_serdes_link_autoneg_complete,
+       e1000_serdes_link_forced_up
+};
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+/* Receive Descriptor */
+struct e1000_rx_desc {
+       __le64 buffer_addr; /* Address of the descriptor's data buffer */
+       __le16 length;      /* Length of data DMAed into data buffer */
+       __le16 csum;        /* Packet checksum */
+       u8  status;         /* Descriptor status */
+       u8  errors;         /* Descriptor Errors */
+       __le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+       struct {
+               __le64 buffer_addr;
+               __le64 reserved;
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;           /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;         /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;  /* IP id */
+                                       __le16 csum;   /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;  /* ext status/error */
+                       __le16 length;
+                       __le16 vlan;          /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+       struct {
+               /* one buffer for protocol header(s), three data buffers */
+               __le64 buffer_addr[MAX_PS_BUFFERS];
+       } read;
+       struct {
+               struct {
+                       __le32 mrq;           /* Multiple Rx Queues */
+                       union {
+                               __le32 rss;           /* RSS Hash */
+                               struct {
+                                       __le16 ip_id;    /* IP id */
+                                       __le16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error;  /* ext status/error */
+                       __le16 length0;       /* length of buffer 0 */
+                       __le16 vlan;          /* VLAN tag */
+               } middle;
+               struct {
+                       __le16 header_status;
+                       __le16 length[3];     /* length of buffers 1-3 */
+               } upper;
+               __le64 reserved;
+       } wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+       __le64 buffer_addr;   /* Address of the descriptor's data buffer */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;    /* Data buffer length */
+                       u8 cso;           /* Checksum offset */
+                       u8 cmd;           /* Descriptor control */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 css;           /* Checksum start */
+                       __le16 special;
+               } fields;
+       } upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+       union {
+               __le32 ip_config;
+               struct {
+                       u8 ipcss;         /* IP checksum start */
+                       u8 ipcso;         /* IP checksum offset */
+                       __le16 ipcse;     /* IP checksum end */
+               } ip_fields;
+       } lower_setup;
+       union {
+               __le32 tcp_config;
+               struct {
+                       u8 tucss;         /* TCP checksum start */
+                       u8 tucso;         /* TCP checksum offset */
+                       __le16 tucse;     /* TCP checksum end */
+               } tcp_fields;
+       } upper_setup;
+       __le32 cmd_and_length;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 hdr_len;       /* Header length */
+                       __le16 mss;       /* Maximum segment size */
+               } fields;
+       } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+       __le64 buffer_addr;   /* Address of the descriptor's buffer address */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;    /* Data buffer length */
+                       u8 typ_len_ext;
+                       u8 cmd;
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 popts;         /* Packet Options */
+                       __le16 special;
+               } fields;
+       } upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+       u64 crcerrs;
+       u64 algnerrc;
+       u64 symerrs;
+       u64 rxerrc;
+       u64 mpc;
+       u64 scc;
+       u64 ecol;
+       u64 mcc;
+       u64 latecol;
+       u64 colc;
+       u64 dc;
+       u64 tncrs;
+       u64 sec;
+       u64 cexterr;
+       u64 rlec;
+       u64 xonrxc;
+       u64 xontxc;
+       u64 xoffrxc;
+       u64 xofftxc;
+       u64 fcruc;
+       u64 prc64;
+       u64 prc127;
+       u64 prc255;
+       u64 prc511;
+       u64 prc1023;
+       u64 prc1522;
+       u64 gprc;
+       u64 bprc;
+       u64 mprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 rnbc;
+       u64 ruc;
+       u64 rfc;
+       u64 roc;
+       u64 rjc;
+       u64 mgprc;
+       u64 mgpdc;
+       u64 mgptc;
+       u64 tor;
+       u64 tot;
+       u64 tpr;
+       u64 tpt;
+       u64 ptc64;
+       u64 ptc127;
+       u64 ptc255;
+       u64 ptc511;
+       u64 ptc1023;
+       u64 ptc1522;
+       u64 mptc;
+       u64 bptc;
+       u64 tsctc;
+       u64 tsctfc;
+       u64 iac;
+       u64 icrxptc;
+       u64 icrxatc;
+       u64 ictxptc;
+       u64 ictxatc;
+       u64 ictxqec;
+       u64 ictxqmtc;
+       u64 icrxdmtc;
+       u64 icrxoc;
+       u64 cbtmpc;
+       u64 htdpmc;
+       u64 cbrdpc;
+       u64 cbrmpc;
+       u64 rpthc;
+       u64 hgptc;
+       u64 htcbdpc;
+       u64 hgorc;
+       u64 hgotc;
+       u64 lenerrs;
+       u64 scvpc;
+       u64 hrmpc;
+       u64 doosync;
+};
+
+struct e1000_vf_stats {
+       u64 base_gprc;
+       u64 base_gptc;
+       u64 base_gorc;
+       u64 base_gotc;
+       u64 base_mprc;
+       u64 base_gotlbc;
+       u64 base_gptlbc;
+       u64 base_gorlbc;
+       u64 base_gprlbc;
+
+       u32 last_gprc;
+       u32 last_gptc;
+       u32 last_gorc;
+       u32 last_gotc;
+       u32 last_mprc;
+       u32 last_gotlbc;
+       u32 last_gptlbc;
+       u32 last_gorlbc;
+       u32 last_gprlbc;
+
+       u64 gprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 mprc;
+       u64 gotlbc;
+       u64 gptlbc;
+       u64 gorlbc;
+       u64 gprlbc;
+};
+
+struct e1000_phy_stats {
+       u32 idle_errors;
+       u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+       u32 signature;
+       u8  status;
+       u8  reserved0;
+       u16 vlan_id;
+       u32 reserved1;
+       u16 reserved2;
+       u8  reserved3;
+       u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+       u8 command_id;
+       u8 command_length;
+       u8 command_options;
+       u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+       struct e1000_host_command_header command_header;
+       u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+       u8  command_id;
+       u8  checksum;
+       u16 reserved1;
+       u16 reserved2;
+       u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+       struct e1000_host_mng_command_header command_header;
+       u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+       /* Function pointers for the MAC. */
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*id_led_init)(struct e1000_hw *);
+       s32  (*blink_led)(struct e1000_hw *);
+       s32  (*check_for_link)(struct e1000_hw *);
+       bool (*check_mng_mode)(struct e1000_hw *hw);
+       s32  (*cleanup_led)(struct e1000_hw *);
+       void (*clear_hw_cntrs)(struct e1000_hw *);
+       void (*clear_vfta)(struct e1000_hw *);
+       s32  (*get_bus_info)(struct e1000_hw *);
+       void (*set_lan_id)(struct e1000_hw *);
+       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+       s32  (*led_on)(struct e1000_hw *);
+       s32  (*led_off)(struct e1000_hw *);
+       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+       s32  (*reset_hw)(struct e1000_hw *);
+       s32  (*init_hw)(struct e1000_hw *);
+       void (*shutdown_serdes)(struct e1000_hw *);
+       void (*power_up_serdes)(struct e1000_hw *);
+       s32  (*setup_link)(struct e1000_hw *);
+       s32  (*setup_physical_interface)(struct e1000_hw *);
+       s32  (*setup_led)(struct e1000_hw *);
+       void (*write_vfta)(struct e1000_hw *, u32, u32);
+       void (*config_collision_dist)(struct e1000_hw *);
+       void (*rar_set)(struct e1000_hw *, u8*, u32);
+       s32  (*read_mac_addr)(struct e1000_hw *);
+       s32  (*validate_mdi_setting)(struct e1000_hw *);
+       s32  (*mng_host_if_write)(struct e1000_hw *, u8*, u16, u16, u8*);
+       s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
+                      struct e1000_host_mng_command_header*);
+       s32  (*mng_enable_host_if)(struct e1000_hw *);
+       s32  (*wait_autoneg)(struct e1000_hw *);
+};
+
+struct e1000_phy_operations {
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*acquire)(struct e1000_hw *);
+       s32  (*check_polarity)(struct e1000_hw *);
+       s32  (*check_reset_block)(struct e1000_hw *);
+       s32  (*commit)(struct e1000_hw *);
+       s32  (*force_speed_duplex)(struct e1000_hw *);
+       s32  (*get_cfg_done)(struct e1000_hw *hw);
+       s32  (*get_cable_length)(struct e1000_hw *);
+       s32  (*get_info)(struct e1000_hw *);
+       s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+       s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+       void (*release)(struct e1000_hw *);
+       s32  (*reset)(struct e1000_hw *);
+       s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+       s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+       s32  (*write_reg)(struct e1000_hw *, u32, u16);
+       s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+       void (*power_up)(struct e1000_hw *);
+       void (*power_down)(struct e1000_hw *);
+};
+
+struct e1000_nvm_operations {
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*acquire)(struct e1000_hw *);
+       s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+       void (*release)(struct e1000_hw *);
+       void (*reload)(struct e1000_hw *);
+       s32  (*update)(struct e1000_hw *);
+       s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+       s32  (*validate)(struct e1000_hw *);
+       s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+       struct e1000_mac_operations ops;
+       u8 addr[ETH_ADDR_LEN];
+       u8 perm_addr[ETH_ADDR_LEN];
+
+       enum e1000_mac_type type;
+
+       u32 collision_delta;
+       u32 ledctl_default;
+       u32 ledctl_mode1;
+       u32 ledctl_mode2;
+       u32 mc_filter_type;
+       u32 tx_packet_delta;
+       u32 txcw;
+
+       u16 current_ifs_val;
+       u16 ifs_max_val;
+       u16 ifs_min_val;
+       u16 ifs_ratio;
+       u16 ifs_step_size;
+       u16 mta_reg_count;
+       u16 uta_reg_count;
+
+       /* Maximum size of the MTA register table in all supported adapters */
+       #define MAX_MTA_REG 128
+       u32 mta_shadow[MAX_MTA_REG];
+       u16 rar_entry_count;
+
+       u8  forced_speed_duplex;
+
+       bool adaptive_ifs;
+       bool has_fwsm;
+       bool arc_subsystem_valid;
+       bool asf_firmware_present;
+       bool autoneg;
+       bool autoneg_failed;
+       bool get_link_status;
+       bool in_ifs_mode;
+       enum e1000_serdes_link_state serdes_link_state;
+       bool serdes_has_link;
+       bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+       struct e1000_phy_operations ops;
+       enum e1000_phy_type type;
+
+       enum e1000_1000t_rx_status local_rx;
+       enum e1000_1000t_rx_status remote_rx;
+       enum e1000_ms_type ms_type;
+       enum e1000_ms_type original_ms_type;
+       enum e1000_rev_polarity cable_polarity;
+       enum e1000_smart_speed smart_speed;
+
+       u32 addr;
+       u32 id;
+       u32 reset_delay_us; /* in usec */
+       u32 revision;
+
+       enum e1000_media_type media_type;
+
+       u16 autoneg_advertised;
+       u16 autoneg_mask;
+       u16 cable_length;
+       u16 max_cable_length;
+       u16 min_cable_length;
+
+       u8 mdix;
+
+       bool disable_polarity_correction;
+       bool is_mdix;
+       bool polarity_correction;
+       bool reset_disable;
+       bool speed_downgraded;
+       bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+       struct e1000_nvm_operations ops;
+       enum e1000_nvm_type type;
+       enum e1000_nvm_override override;
+
+       u32 flash_bank_size;
+       u32 flash_base_addr;
+
+       u16 word_size;
+       u16 delay_usec;
+       u16 address_bits;
+       u16 opcode_bits;
+       u16 page_size;
+};
+
+struct e1000_bus_info {
+       enum e1000_bus_type type;
+       enum e1000_bus_speed speed;
+       enum e1000_bus_width width;
+
+       u16 func;
+       u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+       u32 high_water;          /* Flow control high-water mark */
+       u32 low_water;           /* Flow control low-water mark */
+       u16 pause_time;          /* Flow control pause timer */
+       u16 refresh_time;        /* Flow control refresh timer */
+       bool send_xon;           /* Flow control send XON */
+       bool strict_ieee;        /* Strict IEEE mode */
+       enum e1000_fc_mode current_mode; /* FC mode in effect */
+       enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_mbx_operations {
+       s32 (*init_params)(struct e1000_hw *hw);
+       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*check_for_msg)(struct e1000_hw *, u16);
+       s32 (*check_for_ack)(struct e1000_hw *, u16);
+       s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+       u32 msgs_tx;
+       u32 msgs_rx;
+
+       u32 acks;
+       u32 reqs;
+       u32 rsts;
+};
+
+struct e1000_mbx_info {
+       struct e1000_mbx_operations ops;
+       struct e1000_mbx_stats stats;
+       u32 timeout;
+       u32 usec_delay;
+       u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+       bool sgmii_active;
+       bool global_device_reset;
+       bool eee_disable;
+};
+
+struct e1000_dev_spec_vf {
+       u32 vf_number;
+       u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+       void *back;
+
+       u8 *hw_addr;
+       u8 *flash_address;
+       unsigned long io_base;
+
+       struct e1000_mac_info  mac;
+       struct e1000_fc_info   fc;
+       struct e1000_phy_info  phy;
+       struct e1000_nvm_info  nvm;
+       struct e1000_bus_info  bus;
+       struct e1000_mbx_info mbx;
+       struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+       union {
+               struct e1000_dev_spec_82575 _82575;
+               struct e1000_dev_spec_vf vf;
+       } dev_spec;
+
+       u16 device_id;
+       u16 subsystem_vendor_id;
+       u16 subsystem_device_id;
+       u16 vendor_id;
+
+       u8  revision_id;
+};
+
+#include "e1000_82575.h"
+
+/* These functions must be implemented by drivers */
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32  e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_mac.c b/lib/librte_pmd_igb/igb/e1000_mac.c
new file mode 100644 (file)
index 0000000..1fff576
--- /dev/null
@@ -0,0 +1,2170 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw);
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_mac_ops_generic - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_mac_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       DEBUGFUNC("e1000_init_mac_ops_generic");
+
+       /* General Setup */
+       mac->ops.init_params = e1000_null_ops_generic;
+       mac->ops.init_hw = e1000_null_ops_generic;
+       mac->ops.reset_hw = e1000_null_ops_generic;
+       mac->ops.setup_physical_interface = e1000_null_ops_generic;
+       mac->ops.get_bus_info = e1000_null_ops_generic;
+       mac->ops.set_lan_id = e1000_set_lan_id_multi_port_pcie;
+       mac->ops.read_mac_addr = e1000_read_mac_addr_generic;
+       mac->ops.config_collision_dist = e1000_config_collision_dist_generic;
+       mac->ops.clear_hw_cntrs = e1000_null_mac_generic;
+       /* LED */
+       mac->ops.cleanup_led = e1000_null_ops_generic;
+       mac->ops.setup_led = e1000_null_ops_generic;
+       mac->ops.blink_led = e1000_null_ops_generic;
+       mac->ops.led_on = e1000_null_ops_generic;
+       mac->ops.led_off = e1000_null_ops_generic;
+       /* LINK */
+       mac->ops.setup_link = e1000_null_ops_generic;
+       mac->ops.get_link_up_info = e1000_null_link_info;
+       mac->ops.check_for_link = e1000_null_ops_generic;
+       mac->ops.wait_autoneg = e1000_wait_autoneg_generic;
+       /* Management */
+       mac->ops.check_mng_mode = e1000_null_mng_mode;
+       mac->ops.mng_host_if_write = e1000_mng_host_if_write_generic;
+       mac->ops.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
+       mac->ops.mng_enable_host_if = e1000_mng_enable_host_if_generic;
+       /* VLAN, MC, etc. */
+       mac->ops.update_mc_addr_list = e1000_null_update_mc;
+       mac->ops.clear_vfta = e1000_null_mac_generic;
+       mac->ops.write_vfta = e1000_null_write_vfta;
+       mac->ops.rar_set = e1000_rar_set_generic;
+       mac->ops.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+}
+
+/**
+ *  e1000_null_ops_generic - No-op function, returns 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_ops_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_ops_generic");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mac_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_mac_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_mac_generic");
+       return;
+}
+
+/**
+ *  e1000_null_link_info - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d)
+{
+       DEBUGFUNC("e1000_null_link_info");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mng_mode - No-op function, return FALSE
+ *  @hw: pointer to the HW structure
+ **/
+bool e1000_null_mng_mode(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_mng_mode");
+       return FALSE;
+}
+
+/**
+ *  e1000_null_update_mc - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a)
+{
+       DEBUGFUNC("e1000_null_update_mc");
+       return;
+}
+
+/**
+ *  e1000_null_write_vfta - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b)
+{
+       DEBUGFUNC("e1000_null_write_vfta");
+       return;
+}
+
+/**
+ *  e1000_null_rar_set - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a)
+{
+       DEBUGFUNC("e1000_null_rar_set");
+       return;
+}
+
+/**
+ *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_bus_info *bus = &hw->bus;
+       u32 status = E1000_READ_REG(hw, E1000_STATUS);
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+       /* PCI or PCI-X? */
+       bus->type = (status & E1000_STATUS_PCIX_MODE)
+                       ? e1000_bus_type_pcix
+                       : e1000_bus_type_pci;
+
+       /* Bus speed */
+       if (bus->type == e1000_bus_type_pci) {
+               bus->speed = (status & E1000_STATUS_PCI66)
+                            ? e1000_bus_speed_66
+                            : e1000_bus_speed_33;
+       } else {
+               switch (status & E1000_STATUS_PCIX_SPEED) {
+               case E1000_STATUS_PCIX_SPEED_66:
+                       bus->speed = e1000_bus_speed_66;
+                       break;
+               case E1000_STATUS_PCIX_SPEED_100:
+                       bus->speed = e1000_bus_speed_100;
+                       break;
+               case E1000_STATUS_PCIX_SPEED_133:
+                       bus->speed = e1000_bus_speed_133;
+                       break;
+               default:
+                       bus->speed = e1000_bus_speed_reserved;
+                       break;
+               }
+       }
+
+       /* Bus width */
+       bus->width = (status & E1000_STATUS_BUS64)
+                    ? e1000_bus_width_64
+                    : e1000_bus_width_32;
+
+       /* Which PCI(-X) function? */
+       mac->ops.set_lan_id(hw);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       struct e1000_bus_info *bus = &hw->bus;
+       s32 ret_val;
+       u16 pcie_link_status;
+
+       DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+       bus->type = e1000_bus_type_pci_express;
+
+       ret_val = e1000_read_pcie_cap_reg(hw,
+                                         PCIE_LINK_STATUS,
+                                         &pcie_link_status);
+       if (ret_val) {
+               bus->width = e1000_bus_width_unknown;
+               bus->speed = e1000_bus_speed_unknown;
+       } else {
+               switch (pcie_link_status & PCIE_LINK_SPEED_MASK) {
+               case PCIE_LINK_SPEED_2500:
+                       bus->speed = e1000_bus_speed_2500;
+                       break;
+               case PCIE_LINK_SPEED_5000:
+                       bus->speed = e1000_bus_speed_5000;
+                       break;
+               default:
+                       bus->speed = e1000_bus_speed_unknown;
+                       break;
+               }
+
+               bus->width = (enum e1000_bus_width)((pcie_link_status &
+                                               PCIE_LINK_WIDTH_MASK) >>
+                                              PCIE_LINK_WIDTH_SHIFT);
+       }
+
+       mac->ops.set_lan_id(hw);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+static void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+       u32 reg;
+
+       /*
+        * The status register reports the correct function number
+        * for the device regardless of function swap state.
+        */
+       reg = E1000_READ_REG(hw, E1000_STATUS);
+       bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pci - Set LAN id for PCI multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading PCI config space.
+ **/
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+       u16 pci_header_type;
+       u32 status;
+
+       e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+       if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               bus->func = (status & E1000_STATUS_FUNC_MASK)
+                           >> E1000_STATUS_FUNC_SHIFT;
+       } else {
+               bus->func = 0;
+       }
+}
+
+/**
+ *  e1000_set_lan_id_single_port - Set LAN id for a single port device
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+
+       bus->func = 0;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+       u32 offset;
+
+       DEBUGFUNC("e1000_clear_vfta_generic");
+
+       for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+               E1000_WRITE_FLUSH(hw);
+       }
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+       DEBUGFUNC("e1000_write_vfta_generic");
+
+       E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_init_rx_addrs_generic - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setup the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+       u32 i;
+       u8 mac_addr[ETH_ADDR_LEN] = {0};
+
+       DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+       /* Setup the receive address */
+       DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+       hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+       /* Zero out the other (rar_entry_count - 1) receive addresses */
+       DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+       for (i = 1; i < rar_count; i++)
+               hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address. If an
+ *  alternate MAC address is found it is programmed into RAR0, replacing
+ *  the permanent address that was installed into RAR0 by the Si on reset.
+ *  This function will return SUCCESS unless it encounters an error while
+ *  reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+       u32 i;
+       s32 ret_val = E1000_SUCCESS;
+       u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+       u8 alt_mac_addr[ETH_ADDR_LEN];
+
+       DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &nvm_data);
+       if (ret_val)
+               goto out;
+
+       if (!(nvm_data & NVM_COMPAT_LOM))
+               goto out;
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+                                &nvm_alt_mac_addr_offset);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (nvm_alt_mac_addr_offset == 0xFFFF) {
+               /* There is no Alternate MAC Address */
+               goto out;
+       }
+
+       if (hw->bus.func == E1000_FUNC_1)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+       if (hw->bus.func == E1000_FUNC_2)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+       if (hw->bus.func == E1000_FUNC_3)
+               nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+       for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+               offset = nvm_alt_mac_addr_offset + (i >> 1);
+               ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+
+               alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+               alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+       }
+
+       /* if multicast bit is set, the alternate address will not be used */
+       if (alt_mac_addr[0] & 0x01) {
+               DEBUGOUT("Ignoring Alternate Mac Address with MC bit set\n");
+               goto out;
+       }
+
+       /*
+        * We have a valid alternate MAC address, and we want to treat it the
+        * same as the normal permanent MAC address stored by the HW into the
+        * RAR. Do this by mapping this address into RAR0.
+        */
+       hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_rar_set_generic - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+       u32 rar_low, rar_high;
+
+       DEBUGFUNC("e1000_rar_set_generic");
+
+       /*
+        * HW expects these in little endian so we reverse the byte order
+        * from network order (big endian) to little endian
+        */
+       rar_low = ((u32) addr[0] |
+                  ((u32) addr[1] << 8) |
+                  ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+       rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+       /* If MAC address zero, no need to set the AV bit */
+       if (rar_low || rar_high)
+               rar_high |= E1000_RAH_AV;
+
+       /*
+        * Some bridges will combine consecutive 32-bit writes into
+        * a single burst write, which will malfunction on some parts.
+        * The flushes avoid this.
+        */
+       E1000_WRITE_REG(hw, E1000_RAL(index), rar_low);
+       E1000_WRITE_FLUSH(hw);
+       E1000_WRITE_REG(hw, E1000_RAH(index), rar_high);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count)
+{
+       u32 hash_value, hash_bit, hash_reg;
+       int i;
+
+       DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+       /* clear mta_shadow */
+       memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+       /* update mta_shadow from mc_addr_list */
+       for (i = 0; (u32) i < mc_addr_count; i++) {
+               hash_value = e1000_hash_mc_addr_generic(hw, mc_addr_list);
+
+               hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+               hash_bit = hash_value & 0x1F;
+
+               hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+               mc_addr_list += (ETH_ADDR_LEN);
+       }
+
+       /* replace the entire MTA table */
+       for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+               E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_hash_mc_addr_generic - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+       u32 hash_value, hash_mask;
+       u8 bit_shift = 0;
+
+       DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+       /* Register count multiplied by bits per register */
+       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+       /*
+        * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+        * where 0xFF would still fall within the hash mask.
+        */
+       while (hash_mask >> bit_shift != 0xFF)
+               bit_shift++;
+
+       /*
+        * The portion of the address that is used for the hash table
+        * is determined by the mc_filter_type setting.
+        * The algorithm is such that there is a total of 8 bits of shifting.
+        * The bit_shift for a mc_filter_type of 0 represents the number of
+        * left-shifts where the MSB of mc_addr[5] would still fall within
+        * the hash_mask.  Case 0 does this exactly.  Since there are a total
+        * of 8 bits of shifting, then mc_addr[4] will shift right the
+        * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+        * cases are a variation of this algorithm...essentially raising the
+        * number of bits to shift mc_addr[5] left, while still keeping the
+        * 8-bit shifting total.
+        *
+        * For example, given the following Destination MAC Address and an
+        * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+        * we can see that the bit_shift for case 0 is 4.  These are the hash
+        * values resulting from each mc_filter_type...
+        * [0] [1] [2] [3] [4] [5]
+        * 01  AA  00  12  34  56
+        * LSB                 MSB
+        *
+        * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+        * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+        * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+        * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+        */
+       switch (hw->mac.mc_filter_type) {
+       default:
+       case 0:
+               break;
+       case 1:
+               bit_shift += 1;
+               break;
+       case 2:
+               bit_shift += 2;
+               break;
+       case 3:
+               bit_shift += 4;
+               break;
+       }
+
+       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+                                 (((u16) mc_addr[5]) << bit_shift)));
+
+       return hash_value;
+}
+
+/**
+ *  e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ *  @hw: pointer to the HW structure
+ *
+ *  In certain situations, a system BIOS may report that the PCIx maximum
+ *  memory read byte count (MMRBC) value is higher than than the actual
+ *  value. We check the PCIx command register with the current PCIx status
+ *  register.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+       u16 cmd_mmrbc;
+       u16 pcix_cmd;
+       u16 pcix_stat_hi_word;
+       u16 stat_mmrbc;
+
+       DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+       /* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+       if (hw->bus.type != e1000_bus_type_pcix)
+               return;
+
+       e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+       e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+       cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+                    PCIX_COMMAND_MMRBC_SHIFT;
+       stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+                     PCIX_STATUS_HI_MMRBC_SHIFT;
+       if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+               stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+       if (cmd_mmrbc > stat_mmrbc) {
+               pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+               pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+               e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+       }
+}
+
+/**
+ *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+       E1000_READ_REG(hw, E1000_CRCERRS);
+       E1000_READ_REG(hw, E1000_SYMERRS);
+       E1000_READ_REG(hw, E1000_MPC);
+       E1000_READ_REG(hw, E1000_SCC);
+       E1000_READ_REG(hw, E1000_ECOL);
+       E1000_READ_REG(hw, E1000_MCC);
+       E1000_READ_REG(hw, E1000_LATECOL);
+       E1000_READ_REG(hw, E1000_COLC);
+       E1000_READ_REG(hw, E1000_DC);
+       E1000_READ_REG(hw, E1000_SEC);
+       E1000_READ_REG(hw, E1000_RLEC);
+       E1000_READ_REG(hw, E1000_XONRXC);
+       E1000_READ_REG(hw, E1000_XONTXC);
+       E1000_READ_REG(hw, E1000_XOFFRXC);
+       E1000_READ_REG(hw, E1000_XOFFTXC);
+       E1000_READ_REG(hw, E1000_FCRUC);
+       E1000_READ_REG(hw, E1000_GPRC);
+       E1000_READ_REG(hw, E1000_BPRC);
+       E1000_READ_REG(hw, E1000_MPRC);
+       E1000_READ_REG(hw, E1000_GPTC);
+       E1000_READ_REG(hw, E1000_GORCL);
+       E1000_READ_REG(hw, E1000_GORCH);
+       E1000_READ_REG(hw, E1000_GOTCL);
+       E1000_READ_REG(hw, E1000_GOTCH);
+       E1000_READ_REG(hw, E1000_RNBC);
+       E1000_READ_REG(hw, E1000_RUC);
+       E1000_READ_REG(hw, E1000_RFC);
+       E1000_READ_REG(hw, E1000_ROC);
+       E1000_READ_REG(hw, E1000_RJC);
+       E1000_READ_REG(hw, E1000_TORL);
+       E1000_READ_REG(hw, E1000_TORH);
+       E1000_READ_REG(hw, E1000_TOTL);
+       E1000_READ_REG(hw, E1000_TOTH);
+       E1000_READ_REG(hw, E1000_TPR);
+       E1000_READ_REG(hw, E1000_TPT);
+       E1000_READ_REG(hw, E1000_MPTC);
+       E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ *  e1000_check_for_copper_link_generic - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       bool link;
+
+       DEBUGFUNC("e1000_check_for_copper_link");
+
+       /*
+        * We only want to go out to the PHY registers to see if Auto-Neg
+        * has completed and/or if our link status has changed.  The
+        * get_link_status flag is set upon receiving a Link Status
+        * Change or Rx Sequence Error interrupt.
+        */
+       if (!mac->get_link_status) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /*
+        * First we want to see if the MII Status Register reports
+        * link.  If so, then we want to get the current speed/duplex
+        * of the PHY.
+        */
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link)
+               goto out; /* No link detected */
+
+       mac->get_link_status = FALSE;
+
+       /*
+        * Check if there was DownShift, must be checked
+        * immediately after link-up
+        */
+       e1000_check_downshift_generic(hw);
+
+       /*
+        * If we are forcing speed/duplex, then we simply return since
+        * we have already determined whether we have link or not.
+        */
+       if (!mac->autoneg) {
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       /*
+        * Auto-Neg is enabled.  Auto Speed Detection takes care
+        * of MAC speed/duplex configuration.  So we only need to
+        * configure Collision Distance in the MAC.
+        */
+       mac->ops.config_collision_dist(hw);
+
+       /*
+        * Configure Flow Control now that Auto-Neg has completed.
+        * First, we need to restore the desired flow control
+        * settings because we may have had to re-autoneg with a
+        * different link partner.
+        */
+       ret_val = e1000_config_fc_after_link_up_generic(hw);
+       if (ret_val)
+               DEBUGOUT("Error configuring flow control\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 rxcw;
+       u32 ctrl;
+       u32 status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+       /*
+        * If we don't have link (auto-negotiation failed or link partner
+        * cannot auto-negotiate), the cable is plugged in (we have signal),
+        * and our link partner is not trying to auto-negotiate with us (we
+        * are receiving idles or data), we need to force link up. We also
+        * need to give auto-negotiation time to complete, in case the cable
+        * was just plugged in. The autoneg_failed flag does this.
+        */
+       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+       if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+           (!(rxcw & E1000_RXCW_C))) {
+               if (mac->autoneg_failed == 0) {
+                       mac->autoneg_failed = 1;
+                       goto out;
+               }
+               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+               /* Disable auto-negotiation in the TXCW register */
+               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+               /* Force link-up and also force full-duplex. */
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+               /* Configure Flow Control after forcing link up. */
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       goto out;
+               }
+       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+               /*
+                * If we are forcing link and we are receiving /C/ ordered
+                * sets, re-enable auto-negotiation in the TXCW register
+                * and disable forced link in the Device Control register
+                * in an attempt to auto-negotiate with our link partner.
+                */
+               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+               mac->serdes_has_link = TRUE;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 rxcw;
+       u32 ctrl;
+       u32 status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+       /*
+        * If we don't have link (auto-negotiation failed or link partner
+        * cannot auto-negotiate), and our link partner is not trying to
+        * auto-negotiate with us (we are receiving idles or data),
+        * we need to force link up. We also need to give auto-negotiation
+        * time to complete.
+        */
+       /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+       if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+               if (mac->autoneg_failed == 0) {
+                       mac->autoneg_failed = 1;
+                       goto out;
+               }
+               DEBUGOUT("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+               /* Disable auto-negotiation in the TXCW register */
+               E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+               /* Force link-up and also force full-duplex. */
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+               /* Configure Flow Control after forcing link up. */
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error configuring flow control\n");
+                       goto out;
+               }
+       } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+               /*
+                * If we are forcing link and we are receiving /C/ ordered
+                * sets, re-enable auto-negotiation in the TXCW register
+                * and disable forced link in the Device Control register
+                * in an attempt to auto-negotiate with our link partner.
+                */
+               DEBUGOUT("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+               E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+               E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+               mac->serdes_has_link = TRUE;
+       } else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+               /*
+                * If we force link for non-auto-negotiation switch, check
+                * link status based on MAC synchronization for internal
+                * serdes media type.
+                */
+               /* SYNCH bit and IV bit are sticky. */
+               usec_delay(10);
+               rxcw = E1000_READ_REG(hw, E1000_RXCW);
+               if (rxcw & E1000_RXCW_SYNCH) {
+                       if (!(rxcw & E1000_RXCW_IV)) {
+                               mac->serdes_has_link = TRUE;
+                               DEBUGOUT("SERDES: Link up - forced.\n");
+                       }
+               } else {
+                       mac->serdes_has_link = FALSE;
+                       DEBUGOUT("SERDES: Link down - force failed.\n");
+               }
+       }
+
+       if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               if (status & E1000_STATUS_LU) {
+                       /* SYNCH bit and IV bit are sticky, so reread rxcw. */
+                       usec_delay(10);
+                       rxcw = E1000_READ_REG(hw, E1000_RXCW);
+                       if (rxcw & E1000_RXCW_SYNCH) {
+                               if (!(rxcw & E1000_RXCW_IV)) {
+                                       mac->serdes_has_link = TRUE;
+                                       DEBUGOUT("SERDES: Link up - autoneg "
+                                          "completed sucessfully.\n");
+                               } else {
+                                       mac->serdes_has_link = FALSE;
+                                       DEBUGOUT("SERDES: Link down - invalid"
+                                          "codewords detected in autoneg.\n");
+                               }
+                       } else {
+                               mac->serdes_has_link = FALSE;
+                               DEBUGOUT("SERDES: Link down - no sync.\n");
+                       }
+               } else {
+                       mac->serdes_has_link = FALSE;
+                       DEBUGOUT("SERDES: Link down - autoneg failed\n");
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_link_generic - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_link_generic");
+
+       /*
+        * In the case of the phy reset being blocked, we already have a link.
+        * We do not need to set it up again.
+        */
+       if (e1000_check_reset_block(hw))
+               goto out;
+
+       /*
+        * If requested flow control is set to default, set flow control
+        * based on the EEPROM flow control settings.
+        */
+       if (hw->fc.requested_mode == e1000_fc_default) {
+               ret_val = e1000_set_default_fc_generic(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       /*
+        * Save off the requested flow control mode for use later.  Depending
+        * on the link partner's capabilities, we may or may not use this mode.
+        */
+       hw->fc.current_mode = hw->fc.requested_mode;
+
+       DEBUGOUT1("After fix-ups FlowControl is now = %x\n",
+               hw->fc.current_mode);
+
+       /* Call the necessary media_type subroutine to configure the link. */
+       ret_val = hw->mac.ops.setup_physical_interface(hw);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Initialize the flow control address, type, and PAUSE timer
+        * registers to their default values.  This is done even if flow
+        * control is disabled, because it does not hurt anything to
+        * initialize these registers.
+        */
+       DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+       E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+       E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+       E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+       E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+       ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 ctrl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /* Take the link out of reset */
+       ctrl &= ~E1000_CTRL_LRST;
+
+       mac->ops.config_collision_dist(hw);
+
+       ret_val = e1000_commit_fc_settings_generic(hw);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Since auto-negotiation is enabled, take the link out of reset (the
+        * link will be in reset, because we previously reset the chip). This
+        * will restart auto-negotiation.  If auto-negotiation is successful
+        * then the link-up status bit will be set and the flow control enable
+        * bits (RFCE and TFCE) will be set according to their negotiated value.
+        */
+       DEBUGOUT("Auto-negotiation enabled\n");
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+       E1000_WRITE_FLUSH(hw);
+       msec_delay(1);
+
+       /*
+        * For these adapters, the SW definable pin 1 is set when the optics
+        * detect a signal.  If we have a signal, then poll for a "Link-Up"
+        * indication.
+        */
+       if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+           (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+               ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+       } else {
+               DEBUGOUT("No signal detected\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_generic - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+       u32 tctl;
+
+       DEBUGFUNC("e1000_config_collision_dist_generic");
+
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+       tctl &= ~E1000_TCTL_COLD;
+       tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+       E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 i, status;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+       /*
+        * If we have a signal (the cable is plugged in, or assumed TRUE for
+        * serdes media) then poll for a "Link-Up" indication in the Device
+        * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+        * seconds (Auto-negotiation should complete in less than 500
+        * milliseconds even if the other end is doing it in SW).
+        */
+       for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+               msec_delay(10);
+               status = E1000_READ_REG(hw, E1000_STATUS);
+               if (status & E1000_STATUS_LU)
+                       break;
+       }
+       if (i == FIBER_LINK_UP_LIMIT) {
+               DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+               mac->autoneg_failed = 1;
+               /*
+                * AutoNeg failed to achieve a link, so we'll call
+                * mac->check_for_link. This routine will force the
+                * link up if we detect a signal. This will allow us to
+                * communicate with non-autonegotiating link partners.
+                */
+               ret_val = mac->ops.check_for_link(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error while checking for link\n");
+                       goto out;
+               }
+               mac->autoneg_failed = 0;
+       } else {
+               mac->autoneg_failed = 0;
+               DEBUGOUT("Valid Link Found\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 txcw;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+       /*
+        * Check for a software override of the flow control settings, and
+        * setup the device accordingly.  If auto-negotiation is enabled, then
+        * software will have to set the "PAUSE" bits to the correct value in
+        * the Transmit Config Word Register (TXCW) and re-start auto-
+        * negotiation.  However, if auto-negotiation is disabled, then
+        * software will have to manually configure the two flow control enable
+        * bits in the CTRL register.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames,
+        *          but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames but we
+        *          do not support receiving pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+        */
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               /* Flow control completely disabled by a software over-ride. */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+               break;
+       case e1000_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is disabled
+                * by a software over-ride. Since there really isn't a way to
+                * advertise that we are capable of Rx Pause ONLY, we will
+                * advertise that we support both symmetric and asymmetric Rx
+                * PAUSE.  Later, we will disable the adapter's ability to send
+                * PAUSE frames.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       case e1000_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is disabled,
+                * by a software over-ride.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+               break;
+       case e1000_fc_full:
+               /*
+                * Flow control (both Rx and Tx) is enabled by a software
+                * over-ride.
+                */
+               txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+       mac->txcw = txcw;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  transmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+       u32 fcrtl = 0, fcrth = 0;
+
+       DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+       /*
+        * Set the flow control receive threshold registers.  Normally,
+        * these registers will be set to a default threshold that may be
+        * adjusted later by the driver's runtime code.  However, if the
+        * ability to transmit pause frames is not enabled, then these
+        * registers will be set to 0.
+        */
+       if (hw->fc.current_mode & e1000_fc_tx_pause) {
+               /*
+                * We need to set up the Receive Threshold high and low water
+                * marks as well as (optionally) enabling the transmission of
+                * XON frames.
+                */
+               fcrtl = hw->fc.low_water;
+               if (hw->fc.send_xon)
+                       fcrtl |= E1000_FCRTL_XONE;
+
+               fcrth = hw->fc.high_water;
+       }
+       E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+       E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 nvm_data;
+
+       DEBUGFUNC("e1000_set_default_fc_generic");
+
+       /*
+        * Read and store word 0x0F of the EEPROM. This word contains bits
+        * that determine the hardware's default PAUSE (flow control) mode,
+        * a bit that determines whether the HW defaults to enabling or
+        * disabling auto-negotiation, and the direction of the
+        * SW defined pins. If there is no SW over-ride of the flow
+        * control setting, then the variable hw->fc will
+        * be initialized based on a value in the EEPROM.
+        */
+       ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+               hw->fc.requested_mode = e1000_fc_none;
+       else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+                NVM_WORD0F_ASM_DIR)
+               hw->fc.requested_mode = e1000_fc_tx_pause;
+       else
+               hw->fc.requested_mode = e1000_fc_full;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_force_mac_fc_generic");
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+       /*
+        * Because we didn't get link via the internal auto-negotiation
+        * mechanism (we either forced link or we got link via PHY
+        * auto-neg), we have to manually enable/disable transmit an
+        * receive flow control.
+        *
+        * The "Case" statement below enables/disable flow control
+        * according to the "hw->fc.current_mode" parameter.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause
+        *          frames but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          frames but we do not receive pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+        *  other:  No other values should be possible at this point.
+        */
+       DEBUGOUT1("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+               break;
+       case e1000_fc_rx_pause:
+               ctrl &= (~E1000_CTRL_TFCE);
+               ctrl |= E1000_CTRL_RFCE;
+               break;
+       case e1000_fc_tx_pause:
+               ctrl &= (~E1000_CTRL_RFCE);
+               ctrl |= E1000_CTRL_TFCE;
+               break;
+       case e1000_fc_full:
+               ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_config_fc_after_link_up_generic - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+       u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+       u16 speed, duplex;
+
+       DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+       /*
+        * Check for the case where we have fiber media and auto-neg failed
+        * so we had to force link.  In this case, we need to force the
+        * configuration of the MAC to match the "fc" parameter.
+        */
+       if (mac->autoneg_failed) {
+               if (hw->phy.media_type == e1000_media_type_fiber ||
+                   hw->phy.media_type == e1000_media_type_internal_serdes)
+                       ret_val = e1000_force_mac_fc_generic(hw);
+       } else {
+               if (hw->phy.media_type == e1000_media_type_copper)
+                       ret_val = e1000_force_mac_fc_generic(hw);
+       }
+
+       if (ret_val) {
+               DEBUGOUT("Error forcing flow control settings\n");
+               goto out;
+       }
+
+       /*
+        * Check for the case where we have copper media and auto-neg is
+        * enabled.  In this case, we need to check and see if Auto-Neg
+        * has completed, and if so, how the PHY and link partner has
+        * flow control configured.
+        */
+       if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+               /*
+                * Read the MII Status Register and check to see if AutoNeg
+                * has completed.  We read this twice because this reg has
+                * some "sticky" (latched) bits.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       goto out;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &mii_status_reg);
+               if (ret_val)
+                       goto out;
+
+               if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+                       DEBUGOUT("Copper PHY and Auto Neg "
+                                "has not completed.\n");
+                       goto out;
+               }
+
+               /*
+                * The AutoNeg process has completed, so we now need to
+                * read both the Auto Negotiation Advertisement
+                * Register (Address 4) and the Auto_Negotiation Base
+                * Page Ability Register (Address 5) to determine how
+                * flow control was negotiated.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+                                            &mii_nway_adv_reg);
+               if (ret_val)
+                       goto out;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+                                            &mii_nway_lp_ability_reg);
+               if (ret_val)
+                       goto out;
+
+               /*
+                * Two bits in the Auto Negotiation Advertisement Register
+                * (Address 4) and two bits in the Auto Negotiation Base
+                * Page Ability Register (Address 5) determine flow control
+                * for both the PHY and the link partner.  The following
+                * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+                * 1999, describes these PAUSE resolution bits and how flow
+                * control is determined based upon these settings.
+                * NOTE:  DC = Don't Care
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+                *-------|---------|-------|---------|--------------------
+                *   0   |    0    |  DC   |   DC    | e1000_fc_none
+                *   0   |    1    |   0   |   DC    | e1000_fc_none
+                *   0   |    1    |   1   |    0    | e1000_fc_none
+                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+                *   1   |    0    |   0   |   DC    | e1000_fc_none
+                *   1   |   DC    |   1   |   DC    | e1000_fc_full
+                *   1   |    1    |   0   |    0    | e1000_fc_none
+                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+                *
+                * Are both PAUSE bits set to 1?  If so, this implies
+                * Symmetric Flow Control is enabled at both ends.  The
+                * ASM_DIR bits are irrelevant per the spec.
+                *
+                * For Symmetric Flow Control:
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   1   |   DC    |   1   |   DC    | E1000_fc_full
+                *
+                */
+               if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                   (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                       /*
+                        * Now we need to check if the user selected Rx ONLY
+                        * of pause frames.  In this case, we had to advertise
+                        * FULL flow control because we could not advertise Rx
+                        * ONLY. Hence, we must now check to see if we need to
+                        * turn OFF the TRANSMISSION of PAUSE frames.
+                        */
+                       if (hw->fc.requested_mode == e1000_fc_full) {
+                               hw->fc.current_mode = e1000_fc_full;
+                               DEBUGOUT("Flow Control = FULL.\r\n");
+                       } else {
+                               hw->fc.current_mode = e1000_fc_rx_pause;
+                               DEBUGOUT("Flow Control = "
+                                        "Rx PAUSE frames only.\r\n");
+                       }
+               }
+               /*
+                * For receiving PAUSE frames ONLY.
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+                */
+               else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                         (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                       hw->fc.current_mode = e1000_fc_tx_pause;
+                       DEBUGOUT("Flow Control = Tx PAUSE frames only.\r\n");
+               }
+               /*
+                * For transmitting PAUSE frames ONLY.
+                *
+                *   LOCAL DEVICE  |   LINK PARTNER
+                * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+                *-------|---------|-------|---------|--------------------
+                *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+                */
+               else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                        (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                        !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                        (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                       hw->fc.current_mode = e1000_fc_rx_pause;
+                       DEBUGOUT("Flow Control = Rx PAUSE frames only.\r\n");
+               } else {
+                       /*
+                        * Per the IEEE spec, at this point flow control
+                        * should be disabled.
+                        */
+                       hw->fc.current_mode = e1000_fc_none;
+                       DEBUGOUT("Flow Control = NONE.\r\n");
+               }
+
+               /*
+                * Now we need to do one last check...  If we auto-
+                * negotiated to HALF DUPLEX, flow control should not be
+                * enabled per IEEE 802.3 spec.
+                */
+               ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+               if (ret_val) {
+                       DEBUGOUT("Error getting link speed and duplex\n");
+                       goto out;
+               }
+
+               if (duplex == HALF_DUPLEX)
+                       hw->fc.current_mode = e1000_fc_none;
+
+               /*
+                * Now we call a subroutine to actually force the MAC
+                * controller to use the correct flow control settings.
+                */
+               ret_val = e1000_force_mac_fc_generic(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error forcing flow control settings\n");
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_copper_generic - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+       u32 status;
+
+       DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       if (status & E1000_STATUS_SPEED_1000) {
+               *speed = SPEED_1000;
+               DEBUGOUT("1000 Mbs, ");
+       } else if (status & E1000_STATUS_SPEED_100) {
+               *speed = SPEED_100;
+               DEBUGOUT("100 Mbs, ");
+       } else {
+               *speed = SPEED_10;
+               DEBUGOUT("10 Mbs, ");
+       }
+
+       if (status & E1000_STATUS_FD) {
+               *duplex = FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       } else {
+               *duplex = HALF_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_fiber_generic - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                    u16 *speed, u16 *duplex)
+{
+       DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+       *speed = SPEED_1000;
+       *duplex = FULL_DUPLEX;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+       u32 swsm;
+       s32 ret_val = E1000_SUCCESS;
+       s32 timeout = hw->nvm.word_size + 1;
+       s32 i = 0;
+
+       DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+       /* Get the SW semaphore */
+       while (i < timeout) {
+               swsm = E1000_READ_REG(hw, E1000_SWSM);
+               if (!(swsm & E1000_SWSM_SMBI))
+                       break;
+
+               usec_delay(50);
+               i++;
+       }
+
+       if (i == timeout) {
+               DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       /* Get the FW semaphore. */
+       for (i = 0; i < timeout; i++) {
+               swsm = E1000_READ_REG(hw, E1000_SWSM);
+               E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+               /* Semaphore acquired if bit latched */
+               if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+                       break;
+
+               usec_delay(50);
+       }
+
+       if (i == timeout) {
+               /* Release semaphores */
+               e1000_put_hw_semaphore_generic(hw);
+               DEBUGOUT("Driver can't access the NVM\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_generic - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+       u32 swsm;
+
+       DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+       swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+       swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+       E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_get_auto_rd_done_generic - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+       s32 i = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+       while (i < AUTO_READ_DONE_TIMEOUT) {
+               if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+                       break;
+               msec_delay(1);
+               i++;
+       }
+
+       if (i == AUTO_READ_DONE_TIMEOUT) {
+               DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+               ret_val = -E1000_ERR_RESET;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_generic - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+       s32 ret_val;
+
+       DEBUGFUNC("e1000_valid_led_default_generic");
+
+       ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+               *data = ID_LED_DEFAULT;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_id_led_init_generic -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val;
+       const u32 ledctl_mask = 0x000000FF;
+       const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+       const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+       u16 data, i, temp;
+       const u16 led_mask = 0x0F;
+
+       DEBUGFUNC("e1000_id_led_init_generic");
+
+       ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+       if (ret_val)
+               goto out;
+
+       mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+       mac->ledctl_mode1 = mac->ledctl_default;
+       mac->ledctl_mode2 = mac->ledctl_default;
+
+       for (i = 0; i < 4; i++) {
+               temp = (data >> (i << 2)) & led_mask;
+               switch (temp) {
+               case ID_LED_ON1_DEF2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_ON1_OFF2:
+                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode1 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_OFF1_DEF2:
+               case ID_LED_OFF1_ON2:
+               case ID_LED_OFF1_OFF2:
+                       mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode1 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+               switch (temp) {
+               case ID_LED_DEF1_ON2:
+               case ID_LED_ON1_ON2:
+               case ID_LED_OFF1_ON2:
+                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode2 |= ledctl_on << (i << 3);
+                       break;
+               case ID_LED_DEF1_OFF2:
+               case ID_LED_ON1_OFF2:
+               case ID_LED_OFF1_OFF2:
+                       mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+                       mac->ledctl_mode2 |= ledctl_off << (i << 3);
+                       break;
+               default:
+                       /* Do nothing */
+                       break;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+       u32 ledctl;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_setup_led_generic");
+
+       if (hw->mac.ops.setup_led != e1000_setup_led_generic) {
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       if (hw->phy.media_type == e1000_media_type_fiber) {
+               ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+               hw->mac.ledctl_default = ledctl;
+               /* Turn off LED0 */
+               ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                           E1000_LEDCTL_LED0_BLINK |
+                           E1000_LEDCTL_LED0_MODE_MASK);
+               ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                          E1000_LEDCTL_LED0_MODE_SHIFT);
+               E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+       } else if (hw->phy.media_type == e1000_media_type_copper) {
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_cleanup_led_generic");
+
+       E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the LEDs which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+       u32 ledctl_blink = 0;
+       u32 i;
+
+       DEBUGFUNC("e1000_blink_led_generic");
+
+       if (hw->phy.media_type == e1000_media_type_fiber) {
+               /* always blink LED0 for PCI-E fiber */
+               ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                    (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+       } else {
+               /*
+                * set the blink bit for each LED that's "on" (0x0E)
+                * in ledctl_mode2
+                */
+               ledctl_blink = hw->mac.ledctl_mode2;
+               for (i = 0; i < 4; i++)
+                       if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                           E1000_LEDCTL_MODE_LED_ON)
+                               ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+                                                (i * 8));
+       }
+
+       E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_led_on_generic");
+
+       switch (hw->phy.media_type) {
+       case e1000_media_type_fiber:
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl &= ~E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+               break;
+       case e1000_media_type_copper:
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_led_off_generic");
+
+       switch (hw->phy.media_type) {
+       case e1000_media_type_fiber:
+               ctrl = E1000_READ_REG(hw, E1000_CTRL);
+               ctrl |= E1000_CTRL_SWDPIN0;
+               ctrl |= E1000_CTRL_SWDPIO0;
+               E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+               break;
+       case e1000_media_type_copper:
+               E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+               break;
+       default:
+               break;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+       u32 gcr;
+
+       DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+       if (hw->bus.type != e1000_bus_type_pci_express)
+               goto out;
+
+       if (no_snoop) {
+               gcr = E1000_READ_REG(hw, E1000_GCR);
+               gcr &= ~(PCIE_NO_SNOOP_ALL);
+               gcr |= no_snoop;
+               E1000_WRITE_REG(hw, E1000_GCR, gcr);
+       }
+out:
+       return;
+}
+
+/**
+ *  e1000_disable_pcie_master_generic - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_SUCCESS if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+       u32 ctrl;
+       s32 timeout = MASTER_DISABLE_TIMEOUT;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+       if (hw->bus.type != e1000_bus_type_pci_express)
+               goto out;
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+       while (timeout) {
+               if (!(E1000_READ_REG(hw, E1000_STATUS) &
+                     E1000_STATUS_GIO_MASTER_ENABLE))
+                       break;
+               usec_delay(100);
+               timeout--;
+       }
+
+       if (!timeout) {
+               DEBUGOUT("Master requests are pending.\n");
+               ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_reset_adaptive_generic");
+
+       if (!mac->adaptive_ifs) {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+               goto out;
+       }
+
+       mac->current_ifs_val = 0;
+       mac->ifs_min_val = IFS_MIN;
+       mac->ifs_max_val = IFS_MAX;
+       mac->ifs_step_size = IFS_STEP;
+       mac->ifs_ratio = IFS_RATIO;
+
+       mac->in_ifs_mode = FALSE;
+       E1000_WRITE_REG(hw, E1000_AIT, 0);
+out:
+       return;
+}
+
+/**
+ *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_update_adaptive_generic");
+
+       if (!mac->adaptive_ifs) {
+               DEBUGOUT("Not in Adaptive IFS mode!\n");
+               goto out;
+       }
+
+       if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+               if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+                       mac->in_ifs_mode = TRUE;
+                       if (mac->current_ifs_val < mac->ifs_max_val) {
+                               if (!mac->current_ifs_val)
+                                       mac->current_ifs_val = mac->ifs_min_val;
+                               else
+                                       mac->current_ifs_val +=
+                                               mac->ifs_step_size;
+                               E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
+                       }
+               }
+       } else {
+               if (mac->in_ifs_mode &&
+                   (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+                       mac->current_ifs_val = 0;
+                       mac->in_ifs_mode = FALSE;
+                       E1000_WRITE_REG(hw, E1000_AIT, 0);
+               }
+       }
+out:
+       return;
+}
+
+/**
+ *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotiation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+static s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+       if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+               DEBUGOUT("Invalid MDI setting detected\n");
+               hw->phy.mdix = 1;
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                      u32 offset, u8 data)
+{
+       u32 i, regvalue = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+       /* Set up the address and data */
+       regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+       E1000_WRITE_REG(hw, reg, regvalue);
+
+       /* Poll the ready bit to see if the MDI read completed */
+       for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+               usec_delay(5);
+               regvalue = E1000_READ_REG(hw, reg);
+               if (regvalue & E1000_GEN_CTL_READY)
+                       break;
+       }
+       if (!(regvalue & E1000_GEN_CTL_READY)) {
+               DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
diff --git a/lib/librte_pmd_igb/igb/e1000_mac.h b/lib/librte_pmd_igb/igb/e1000_mac.h
new file mode 100644 (file)
index 0000000..a5a98d0
--- /dev/null
@@ -0,0 +1,95 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+/*
+ * Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+void e1000_init_mac_ops_generic(struct e1000_hw *hw);
+void e1000_null_mac_generic(struct e1000_hw *hw);
+s32  e1000_null_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_link_info(struct e1000_hw *hw, u16 *s, u16 *d);
+bool e1000_null_mng_mode(struct e1000_hw *hw);
+void e1000_null_update_mc(struct e1000_hw *hw, u8 *h, u32 a);
+void e1000_null_write_vfta(struct e1000_hw *hw, u32 a, u32 b);
+void e1000_null_rar_set(struct e1000_hw *hw, u8 *h, u32 a);
+s32  e1000_blink_led_generic(struct e1000_hw *hw);
+s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+void e1000_set_lan_id_multi_port_pci(struct e1000_hw *hw);
+s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                     u16 *speed, u16 *duplex);
+s32  e1000_id_led_init_generic(struct e1000_hw *hw);
+s32  e1000_led_on_generic(struct e1000_hw *hw);
+s32  e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count);
+s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_setup_led_generic(struct e1000_hw *hw);
+s32  e1000_setup_link_generic(struct e1000_hw *hw);
+s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                       u32 offset, u8 data);
+
+u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_manage.c b/lib/librte_pmd_igb/igb/e1000_manage.c
new file mode 100644 (file)
index 0000000..bb0a10b
--- /dev/null
@@ -0,0 +1,472 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+       u32 i;
+       u8 sum = 0;
+
+       DEBUGFUNC("e1000_calculate_checksum");
+
+       if (!buffer)
+               return 0;
+
+       for (i = 0; i < length; i++)
+               sum += buffer[i];
+
+       return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw *hw)
+{
+       u32 hicr;
+       s32 ret_val = E1000_SUCCESS;
+       u8 i;
+
+       DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+       if (!(hw->mac.arc_subsystem_valid)) {
+               DEBUGOUT("ARC subsystem not valid.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Check that the host interface is enabled. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       if ((hicr & E1000_HICR_EN) == 0) {
+               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+       /* check the previous command is completed */
+       for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+               hicr = E1000_READ_REG(hw, E1000_HICR);
+               if (!(hicr & E1000_HICR_C))
+                       break;
+               msec_delay_irq(1);
+       }
+
+       if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+               DEBUGOUT("Previous command timeout failed .\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_mng_mode_generic - Generic check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns TRUE (>0) if
+ *  manageability is enabled, else FALSE (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+       u32 fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+       DEBUGFUNC("e1000_check_mng_mode_generic");
+
+
+       return (fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on Tx
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+       struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+       u32 *buffer = (u32 *)&hw->mng_cookie;
+       u32 offset;
+       s32 ret_val, hdr_csum, csum;
+       u8 i, len;
+
+       DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+       hw->mac.tx_pkt_filtering = TRUE;
+
+       /* No manageability, no filtering */
+       if (!hw->mac.ops.check_mng_mode(hw)) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+       /*
+        * If we can't read from the host interface for whatever
+        * reason, disable filtering.
+        */
+       ret_val = hw->mac.ops.mng_enable_host_if(hw);
+       if (ret_val != E1000_SUCCESS) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+       /* Read in the header.  Length and offset are in dwords. */
+       len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+       offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+       for (i = 0; i < len; i++)
+               *(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF,
+                                                          offset + i);
+       hdr_csum = hdr->checksum;
+       hdr->checksum = 0;
+       csum = e1000_calculate_checksum((u8 *)hdr,
+                                       E1000_MNG_DHCP_COOKIE_LENGTH);
+       /*
+        * If either the checksums or signature don't match, then
+        * the cookie area isn't considered valid, in which case we
+        * take the safe route of assuming Tx filtering is enabled.
+        */
+       if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+               hw->mac.tx_pkt_filtering = TRUE;
+               goto out;
+       }
+
+       /* Cookie area is valid, make the final check for filtering. */
+       if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+               hw->mac.tx_pkt_filtering = FALSE;
+               goto out;
+       }
+
+out:
+       return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw, u8 *buffer,
+                                      u16 length)
+{
+       struct e1000_host_mng_command_header hdr;
+       s32 ret_val;
+       u32 hicr;
+
+       DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+       hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+       hdr.command_length = length;
+       hdr.reserved1 = 0;
+       hdr.reserved2 = 0;
+       hdr.checksum = 0;
+
+       /* Enable the host interface */
+       ret_val = hw->mac.ops.mng_enable_host_if(hw);
+       if (ret_val)
+               goto out;
+
+       /* Populate the host interface with the contents of "buffer". */
+       ret_val = hw->mac.ops.mng_host_if_write(hw, buffer, length,
+                                         sizeof(hdr), &(hdr.checksum));
+       if (ret_val)
+               goto out;
+
+       /* Write the manageability command header */
+       ret_val = hw->mac.ops.mng_write_cmd_header(hw, &hdr);
+       if (ret_val)
+               goto out;
+
+       /* Tell the ARC a new command is pending. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_mng_write_cmd_header_generic - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr)
+{
+       u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+       DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+       /* Write the whole command header structure with new checksum. */
+
+       hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+       length >>= 2;
+       /* Write the relevant command block into the ram area. */
+       for (i = 0; i < length; i++) {
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+                                           *((u32 *) hdr + i));
+               E1000_WRITE_FLUSH(hw);
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mng_host_if_write_generic - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum)
+{
+       u8 *tmp;
+       u8 *bufptr = buffer;
+       u32 data = 0;
+       s32 ret_val = E1000_SUCCESS;
+       u16 remaining, i, j, prev_bytes;
+
+       DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+       /* sum = only sum of the data and it is not checksum */
+
+       if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+               ret_val = -E1000_ERR_PARAM;
+               goto out;
+       }
+
+       tmp = (u8 *)&data;
+       prev_bytes = offset & 0x3;
+       offset >>= 2;
+
+       if (prev_bytes) {
+               data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+               for (j = prev_bytes; j < sizeof(u32); j++) {
+                       *(tmp + j) = *bufptr++;
+                       *sum += *(tmp + j);
+               }
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+               length -= j - prev_bytes;
+               offset++;
+       }
+
+       remaining = length & 0x3;
+       length -= remaining;
+
+       /* Calculate length in DWORDs */
+       length >>= 2;
+
+       /*
+        * The device driver writes the relevant command block into the
+        * ram area.
+        */
+       for (i = 0; i < length; i++) {
+               for (j = 0; j < sizeof(u32); j++) {
+                       *(tmp + j) = *bufptr++;
+                       *sum += *(tmp + j);
+               }
+
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i,
+                                           data);
+       }
+       if (remaining) {
+               for (j = 0; j < sizeof(u32); j++) {
+                       if (j < remaining)
+                               *(tmp + j) = *bufptr++;
+                       else
+                               *(tmp + j) = 0;
+
+                       *sum += *(tmp + j);
+               }
+               E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_enable_mng_pass_thru - Check if management passthrough is needed
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+       u32 manc;
+       u32 fwsm, factps;
+       bool ret_val = FALSE;
+
+       DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+       if (!hw->mac.asf_firmware_present)
+               goto out;
+
+       manc = E1000_READ_REG(hw, E1000_MANC);
+
+       if (!(manc & E1000_MANC_RCV_TCO_EN))
+               goto out;
+
+       if (hw->mac.has_fwsm) {
+               fwsm = E1000_READ_REG(hw, E1000_FWSM);
+               factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+               if (!(factps & E1000_FACTPS_MNGCG) &&
+                   ((fwsm & E1000_FWSM_MODE_MASK) ==
+                    (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+                       ret_val = TRUE;
+                       goto out;
+               }
+       } else if ((manc & E1000_MANC_SMBUS_EN) &&
+                   !(manc & E1000_MANC_ASF_EN)) {
+                       ret_val = TRUE;
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_host_interface_command - Writes buffer to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains a command to write
+ *  @length: the byte length of the buffer, must be multiple of 4 bytes
+ *
+ *  Writes a buffer to the Host Interface.  Upon success, returns E1000_SUCCESS
+ *  else returns E1000_ERR_HOST_INTERFACE_COMMAND.
+ **/
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length)
+{
+       u32 hicr, i;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_host_interface_command");
+
+       if (!(hw->mac.arc_subsystem_valid)) {
+               DEBUGOUT("Hardware doesn't support host interface command.\n");
+               goto out;
+       }
+
+       if (!hw->mac.asf_firmware_present) {
+               DEBUGOUT("Firmware is not present.\n");
+               goto out;
+       }
+
+       if (length == 0 || length & 0x3 ||
+           length > E1000_HI_MAX_BLOCK_BYTE_LENGTH) {
+               DEBUGOUT("Buffer length failure.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Check that the host interface is enabled. */
+       hicr = E1000_READ_REG(hw, E1000_HICR);
+       if ((hicr & E1000_HICR_EN) == 0) {
+               DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Calculate length in DWORDs */
+       length >>= 2;
+
+       /*
+        * The device driver writes the relevant command block
+        * into the ram area.
+        */
+       for (i = 0; i < length; i++)
+               E1000_WRITE_REG_ARRAY_DWORD(hw,
+                                           E1000_HOST_IF,
+                                           i,
+                                           *((u32 *)buffer + i));
+
+       /* Setting this bit tells the ARC that a new command is pending. */
+       E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+       for (i = 0; i < E1000_HI_COMMAND_TIMEOUT; i++) {
+               hicr = E1000_READ_REG(hw, E1000_HICR);
+               if (!(hicr & E1000_HICR_C))
+                       break;
+               msec_delay(1);
+       }
+
+       /* Check command successful completion. */
+       if (i == E1000_HI_COMMAND_TIMEOUT ||
+           (!(E1000_READ_REG(hw, E1000_HICR) & E1000_HICR_SV))) {
+               DEBUGOUT("Command has failed with no status valid.\n");
+               ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       for (i = 0; i < length; i++)
+               *((u32 *)buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+                                                                 E1000_HOST_IF,
+                                                                 i);
+
+out:
+       return ret_val;
+}
+
diff --git a/lib/librte_pmd_igb/igb/e1000_manage.h b/lib/librte_pmd_igb/igb/e1000_manage.h
new file mode 100644 (file)
index 0000000..9a8d756
--- /dev/null
@@ -0,0 +1,90 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                     u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+                                       u8 *buffer, u16 length);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+s32 e1000_host_interface_command(struct e1000_hw *hw, u8 *buffer, u32 length);
+
+enum e1000_mng_mode {
+       e1000_mng_mode_none = 0,
+       e1000_mng_mode_asf,
+       e1000_mng_mode_pt,
+       e1000_mng_mode_ipmi,
+       e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG    0x20000000
+
+#define E1000_FWSM_MODE_MASK  0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE                  0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+#define E1000_HICR_EN              0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C               0x02
+#define E1000_HICR_SV              0x04  /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET        0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE  0x544D4149
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.c b/lib/librte_pmd_igb/igb/e1000_mbx.c
new file mode 100644 (file)
index 0000000..67dbc64
--- /dev/null
@@ -0,0 +1,764 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_mbx.h"
+
+/**
+ *  e1000_null_mbx_check_for_flag - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_check_for_flag(struct e1000_hw *hw, u16 mbx_id)
+{
+       DEBUGFUNC("e1000_null_mbx_check_flag");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_mbx_transact - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_null_mbx_transact(struct e1000_hw *hw, u32 *msg, u16 size,
+                            u16 mbx_id)
+{
+       DEBUGFUNC("e1000_null_mbx_rw_msg");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_read_mbx");
+
+       /* limit read to size of mailbox */
+       if (size > mbx->size)
+               size = mbx->size;
+
+       if (mbx->ops.read)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 e1000_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_mbx");
+
+       if (size > mbx->size)
+               ret_val = -E1000_ERR_MBX;
+
+       else if (mbx->ops.write)
+               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg");
+
+       if (mbx->ops.check_for_msg)
+               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack");
+
+       if (mbx->ops.check_for_ack)
+               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 e1000_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst");
+
+       if (mbx->ops.check_for_rst)
+               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  e1000_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 e1000_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("e1000_poll_for_msg");
+
+       if (!countdown || !mbx->ops.check_for_msg)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+       /* if we failed, all future posted messages fail until reset */
+       if (!countdown)
+               mbx->timeout = 0;
+out:
+       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ *  e1000_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 e1000_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("e1000_poll_for_ack");
+
+       if (!countdown || !mbx->ops.check_for_ack)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+       /* if we failed, all future posted messages fail until reset */
+       if (!countdown)
+               mbx->timeout = 0;
+out:
+       return countdown ? E1000_SUCCESS : -E1000_ERR_MBX;
+}
+
+/**
+ *  e1000_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+s32 e1000_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_read_posted_mbx");
+
+       if (!mbx->ops.read)
+               goto out;
+
+       ret_val = e1000_poll_for_msg(hw, mbx_id);
+
+       /* if ack received read message, otherwise we timed out */
+       if (!ret_val)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+s32 e1000_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_write_posted_mbx");
+
+       /* exit if either we can't write or there isn't a defined timeout */
+       if (!mbx->ops.write || !mbx->timeout)
+               goto out;
+
+       /* send msg */
+       ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       /* if msg sent wait until we receive an ack */
+       if (!ret_val)
+               ret_val = e1000_poll_for_ack(hw, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_ops_generic - Initialize mbx function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the function pointers to no-op functions
+ **/
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       mbx->ops.init_params = e1000_null_ops_generic;
+       mbx->ops.read = e1000_null_mbx_transact;
+       mbx->ops.write = e1000_null_mbx_transact;
+       mbx->ops.check_for_msg = e1000_null_mbx_check_for_flag;
+       mbx->ops.check_for_ack = e1000_null_mbx_check_for_flag;
+       mbx->ops.check_for_rst = e1000_null_mbx_check_for_flag;
+       mbx->ops.read_posted = e1000_read_posted_mbx;
+       mbx->ops.write_posted = e1000_write_posted_mbx;
+}
+
+/**
+ *  e1000_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+static u32 e1000_read_v2p_mailbox(struct e1000_hw *hw)
+{
+       u32 v2p_mailbox = E1000_READ_REG(hw, E1000_V2PMAILBOX(0));
+
+       v2p_mailbox |= hw->dev_spec.vf.v2p_mailbox;
+       hw->dev_spec.vf.v2p_mailbox |= v2p_mailbox & E1000_V2PMAILBOX_R2C_BITS;
+
+       return v2p_mailbox;
+}
+
+/**
+ *  e1000_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+static s32 e1000_check_for_bit_vf(struct e1000_hw *hw, u32 mask)
+{
+       u32 v2p_mailbox = e1000_read_v2p_mailbox(hw);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       if (v2p_mailbox & mask)
+               ret_val = E1000_SUCCESS;
+
+       hw->dev_spec.vf.v2p_mailbox &= ~mask;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg_vf");
+
+       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFSTS)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack_vf");
+
+       if (!e1000_check_for_bit_vf(hw, E1000_V2PMAILBOX_PFACK)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+static s32 e1000_check_for_rst_vf(struct e1000_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst_vf");
+
+       if (!e1000_check_for_bit_vf(hw, (E1000_V2PMAILBOX_RSTD |
+                                        E1000_V2PMAILBOX_RSTI))) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_vf(struct e1000_hw *hw)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_obtain_mbx_lock_vf");
+
+       /* Take ownership of the buffer */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_VFU);
+
+       /* reserve mailbox for vf use */
+       if (e1000_read_v2p_mailbox(hw) & E1000_V2PMAILBOX_VFU)
+               ret_val = E1000_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+                              u16 mbx_id)
+{
+       s32 ret_val;
+       u16 i;
+
+
+       DEBUGFUNC("e1000_write_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       e1000_check_for_msg_vf(hw, 0);
+       e1000_check_for_ack_vf(hw, 0);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(0), i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* Drop VFU and interrupt the PF to tell it a message has been sent */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_REQ);
+
+out_no_write:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
+                             u16 mbx_id)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i;
+
+       DEBUGFUNC("e1000_read_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(0), i);
+
+       /* Acknowledge receipt and release mailbox, then we're done */
+       E1000_WRITE_REG(hw, E1000_V2PMAILBOX(0), E1000_V2PMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 e1000_init_mbx_params_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+
+       /* start mailbox as timed out and let the reset_hw call set the timeout
+        * value to begin communications */
+       mbx->timeout = 0;
+       mbx->usec_delay = E1000_VF_MBX_INIT_DELAY;
+
+       mbx->size = E1000_VFMAILBOX_SIZE;
+
+       mbx->ops.read = e1000_read_mbx_vf;
+       mbx->ops.write = e1000_write_mbx_vf;
+       mbx->ops.read_posted = e1000_read_posted_mbx;
+       mbx->ops.write_posted = e1000_write_posted_mbx;
+       mbx->ops.check_for_msg = e1000_check_for_msg_vf;
+       mbx->ops.check_for_ack = e1000_check_for_ack_vf;
+       mbx->ops.check_for_rst = e1000_check_for_rst_vf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+
+       return E1000_SUCCESS;
+}
+
+static s32 e1000_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+       u32 mbvficr = E1000_READ_REG(hw, E1000_MBVFICR);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       if (mbvficr & mask) {
+               ret_val = E1000_SUCCESS;
+               E1000_WRITE_REG(hw, E1000_MBVFICR, mask);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_msg_pf");
+
+       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_ack_pf");
+
+       if (!e1000_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 e1000_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       u32 vflre = E1000_READ_REG(hw, E1000_VFLRE);
+       s32 ret_val = -E1000_ERR_MBX;
+
+       DEBUGFUNC("e1000_check_for_rst_pf");
+
+       if (vflre & (1 << vf_number)) {
+               ret_val = E1000_SUCCESS;
+               E1000_WRITE_REG(hw, E1000_VFLRE, (1 << vf_number));
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 e1000_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+       s32 ret_val = -E1000_ERR_MBX;
+       u32 p2v_mailbox;
+
+       DEBUGFUNC("e1000_obtain_mbx_lock_pf");
+
+       /* Take ownership of the buffer */
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+       /* reserve mailbox for vf use */
+       p2v_mailbox = E1000_READ_REG(hw, E1000_P2VMAILBOX(vf_number));
+       if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+               ret_val = E1000_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 e1000_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+                              u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("e1000_write_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       e1000_check_for_msg_pf(hw, vf_number);
+       e1000_check_for_ack_pf(hw, vf_number);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               E1000_WRITE_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i, msg[i]);
+
+       /* Interrupt VF to tell it a message has been sent and release buffer*/
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+       return ret_val;
+
+}
+
+/**
+ *  e1000_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 e1000_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+                             u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("e1000_read_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = e1000_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = E1000_READ_REG_ARRAY(hw, E1000_VMBMEM(vf_number), i);
+
+       /* Acknowledge the message and release buffer */
+       E1000_WRITE_REG(hw, E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 e1000_init_mbx_params_pf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+
+       switch (hw->mac.type) {
+       case e1000_82576:
+       case e1000_i350:
+               mbx->timeout = 0;
+               mbx->usec_delay = 0;
+
+               mbx->size = E1000_VFMAILBOX_SIZE;
+
+               mbx->ops.read = e1000_read_mbx_pf;
+               mbx->ops.write = e1000_write_mbx_pf;
+               mbx->ops.read_posted = e1000_read_posted_mbx;
+               mbx->ops.write_posted = e1000_write_posted_mbx;
+               mbx->ops.check_for_msg = e1000_check_for_msg_pf;
+               mbx->ops.check_for_ack = e1000_check_for_ack_pf;
+               mbx->ops.check_for_rst = e1000_check_for_rst_pf;
+
+               mbx->stats.msgs_tx = 0;
+               mbx->stats.msgs_rx = 0;
+               mbx->stats.reqs = 0;
+               mbx->stats.acks = 0;
+               mbx->stats.rsts = 0;
+       default:
+               return E1000_SUCCESS;
+       }
+}
+
diff --git a/lib/librte_pmd_igb/igb/e1000_mbx.h b/lib/librte_pmd_igb/igb/e1000_mbx.h
new file mode 100644 (file)
index 0000000..6e9d538
--- /dev/null
@@ -0,0 +1,106 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_api.h"
+
+/* Define mailbox register bits */
+#define E1000_V2PMAILBOX_REQ   0x00000001 /* Request for PF Ready bit */
+#define E1000_V2PMAILBOX_ACK   0x00000002 /* Ack PF message received */
+#define E1000_V2PMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_V2PMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define E1000_V2PMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define E1000_V2PMAILBOX_RSTI  0x00000040 /* PF has reset indication */
+#define E1000_V2PMAILBOX_RSTD  0x00000080 /* PF has indicated reset done */
+#define E1000_V2PMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define E1000_P2VMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE   16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is TRUE if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define E1000_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+                                               * this are the ACK */
+#define E1000_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+                                               * this are the NACK */
+#define E1000_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+                                                 clear to send requests */
+#define E1000_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK     (0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET            0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR     0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST    0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_MULTICAST_COUNT_MASK (0x1F << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_MULTICAST_OVERFLOW   (0x80 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_VLAN         0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_VLAN_ADD             (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_LPE          0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC      0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_UNICAST      (0x01 << E1000_VT_MSGINFO_SHIFT)
+#define E1000_VF_SET_PROMISC_MULTICAST    (0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+#define E1000_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define E1000_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+s32 e1000_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 e1000_check_for_msg(struct e1000_hw *, u16);
+s32 e1000_check_for_ack(struct e1000_hw *, u16);
+s32 e1000_check_for_rst(struct e1000_hw *, u16);
+void e1000_init_mbx_ops_generic(struct e1000_hw *hw);
+s32 e1000_init_mbx_params_vf(struct e1000_hw *);
+s32 e1000_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.c b/lib/librte_pmd_igb/igb/e1000_nvm.c
new file mode 100644 (file)
index 0000000..1c44270
--- /dev/null
@@ -0,0 +1,1071 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static void e1000_stop_nvm(struct e1000_hw *hw);
+static void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_nvm_ops_generic - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       DEBUGFUNC("e1000_init_nvm_ops_generic");
+
+       /* Initialize function pointers */
+       nvm->ops.init_params = e1000_null_ops_generic;
+       nvm->ops.acquire = e1000_null_ops_generic;
+       nvm->ops.read = e1000_null_read_nvm;
+       nvm->ops.release = e1000_null_nvm_generic;
+       nvm->ops.reload = e1000_reload_nvm_generic;
+       nvm->ops.update = e1000_null_ops_generic;
+       nvm->ops.valid_led_default = e1000_null_led_default;
+       nvm->ops.validate = e1000_null_ops_generic;
+       nvm->ops.write = e1000_null_write_nvm;
+}
+
+/**
+ *  e1000_null_nvm_read - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+{
+       DEBUGFUNC("e1000_null_read_nvm");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_nvm_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_nvm_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_nvm_generic");
+       return;
+}
+
+/**
+ *  e1000_null_led_default - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_led_default(struct e1000_hw *hw, u16 *data)
+{
+       DEBUGFUNC("e1000_null_led_default");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_write_nvm - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c)
+{
+       DEBUGFUNC("e1000_null_write_nvm");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+       *eecd = *eecd | E1000_EECD_SK;
+       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+       E1000_WRITE_FLUSH(hw);
+       usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+       *eecd = *eecd & ~E1000_EECD_SK;
+       E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+       E1000_WRITE_FLUSH(hw);
+       usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       u32 mask;
+
+       DEBUGFUNC("e1000_shift_out_eec_bits");
+
+       mask = 0x01 << (count - 1);
+       if (nvm->type == e1000_nvm_eeprom_microwire)
+               eecd &= ~E1000_EECD_DO;
+       else
+       if (nvm->type == e1000_nvm_eeprom_spi)
+               eecd |= E1000_EECD_DO;
+
+       do {
+               eecd &= ~E1000_EECD_DI;
+
+               if (data & mask)
+                       eecd |= E1000_EECD_DI;
+
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+
+               usec_delay(nvm->delay_usec);
+
+               e1000_raise_eec_clk(hw, &eecd);
+               e1000_lower_eec_clk(hw, &eecd);
+
+               mask >>= 1;
+       } while (mask);
+
+       eecd &= ~E1000_EECD_DI;
+       E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+       u32 eecd;
+       u32 i;
+       u16 data;
+
+       DEBUGFUNC("e1000_shift_in_eec_bits");
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+       data = 0;
+
+       for (i = 0; i < count; i++) {
+               data <<= 1;
+               e1000_raise_eec_clk(hw, &eecd);
+
+               eecd = E1000_READ_REG(hw, E1000_EECD);
+
+               eecd &= ~E1000_EECD_DI;
+               if (eecd & E1000_EECD_DO)
+                       data |= 1;
+
+               e1000_lower_eec_clk(hw, &eecd);
+       }
+
+       return data;
+}
+
+/**
+ *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+       u32 attempts = 100000;
+       u32 i, reg = 0;
+       s32 ret_val = -E1000_ERR_NVM;
+
+       DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+       for (i = 0; i < attempts; i++) {
+               if (ee_reg == E1000_NVM_POLL_READ)
+                       reg = E1000_READ_REG(hw, E1000_EERD);
+               else
+                       reg = E1000_READ_REG(hw, E1000_EEWR);
+
+               if (reg & E1000_NVM_RW_REG_DONE) {
+                       ret_val = E1000_SUCCESS;
+                       break;
+               }
+
+               usec_delay(5);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_acquire_nvm_generic");
+
+       E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       while (timeout) {
+               if (eecd & E1000_EECD_GNT)
+                       break;
+               usec_delay(5);
+               eecd = E1000_READ_REG(hw, E1000_EECD);
+               timeout--;
+       }
+
+       if (!timeout) {
+               eecd &= ~E1000_EECD_REQ;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               DEBUGOUT("Could not acquire NVM grant\n");
+               ret_val = -E1000_ERR_NVM;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+       DEBUGFUNC("e1000_standby_nvm");
+
+       if (nvm->type == e1000_nvm_eeprom_microwire) {
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+
+               e1000_raise_eec_clk(hw, &eecd);
+
+               /* Select EEPROM */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+
+               e1000_lower_eec_clk(hw, &eecd);
+       } else
+       if (nvm->type == e1000_nvm_eeprom_spi) {
+               /* Toggle CS to flush commands */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+               eecd &= ~E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               E1000_WRITE_FLUSH(hw);
+               usec_delay(nvm->delay_usec);
+       }
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+       u32 eecd;
+
+       DEBUGFUNC("e1000_stop_nvm");
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+       if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+               /* Pull CS high */
+               eecd |= E1000_EECD_CS;
+               e1000_lower_eec_clk(hw, &eecd);
+       } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+               /* CS on Microwire is active-high */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               e1000_raise_eec_clk(hw, &eecd);
+               e1000_lower_eec_clk(hw, &eecd);
+       }
+}
+
+/**
+ *  e1000_release_nvm_generic - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+       u32 eecd;
+
+       DEBUGFUNC("e1000_release_nvm_generic");
+
+       e1000_stop_nvm(hw);
+
+       eecd = E1000_READ_REG(hw, E1000_EECD);
+       eecd &= ~E1000_EECD_REQ;
+       E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+       s32 ret_val = E1000_SUCCESS;
+       u8 spi_stat_reg;
+
+       DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+       if (nvm->type == e1000_nvm_eeprom_microwire) {
+               /* Clear SK and DI */
+               eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               /* Set CS */
+               eecd |= E1000_EECD_CS;
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+       } else
+       if (nvm->type == e1000_nvm_eeprom_spi) {
+               u16 timeout = NVM_MAX_RETRY_SPI;
+
+               /* Clear SK and CS */
+               eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+               E1000_WRITE_REG(hw, E1000_EECD, eecd);
+               usec_delay(1);
+
+               /*
+                * Read "Status Register" repeatedly until the LSB is cleared.
+                * The EEPROM will signal that the command has been completed
+                * by clearing bit 0 of the internal status register.  If it's
+                * not cleared within 'timeout', then error out.
+                */
+               while (timeout) {
+                       e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+                                                hw->nvm.opcode_bits);
+                       spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+                       if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+                               break;
+
+                       usec_delay(5);
+                       e1000_standby_nvm(hw);
+                       timeout--;
+               }
+
+               if (!timeout) {
+                       DEBUGOUT("SPI NVM Status error\n");
+                       ret_val = -E1000_ERR_NVM;
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i = 0;
+       s32 ret_val;
+       u16 word_in;
+       u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+       DEBUGFUNC("e1000_read_nvm_spi");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       e1000_standby_nvm(hw);
+
+       if ((nvm->address_bits == 8) && (offset >= 128))
+               read_opcode |= NVM_A8_OPCODE_SPI;
+
+       /* Send the READ command (opcode + addr) */
+       e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+       e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+       /*
+        * Read the data.  SPI NVMs increment the address with each byte
+        * read and will roll over if reading beyond the end.  This allows
+        * us to read the whole NVM from any offset
+        */
+       for (i = 0; i < words; i++) {
+               word_in = e1000_shift_in_eec_bits(hw, 16);
+               data[i] = (word_in >> 8) | (word_in << 8);
+       }
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i = 0;
+       s32 ret_val;
+       u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+       DEBUGFUNC("e1000_read_nvm_microwire");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       for (i = 0; i < words; i++) {
+               /* Send the READ command (opcode + addr) */
+               e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+               e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+                                       nvm->address_bits);
+
+               /*
+                * Read the data.  For microwire, each word requires the
+                * overhead of setup and tear-down.
+                */
+               data[i] = e1000_shift_in_eec_bits(hw, 16);
+               e1000_standby_nvm(hw);
+       }
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       u32 i, eerd = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_nvm_eerd");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * too many words for the offset, and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       for (i = 0; i < words; i++) {
+               eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+                      E1000_NVM_RW_REG_START;
+
+               E1000_WRITE_REG(hw, E1000_EERD, eerd);
+               ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+               if (ret_val)
+                       break;
+
+               data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+                          E1000_NVM_RW_REG_DATA);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       s32 ret_val;
+       u16 widx = 0;
+
+       DEBUGFUNC("e1000_write_nvm_spi");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       while (widx < words) {
+               u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+               ret_val = e1000_ready_nvm_eeprom(hw);
+               if (ret_val)
+                       goto release;
+
+               e1000_standby_nvm(hw);
+
+               /* Send the WRITE ENABLE command (8 bit opcode) */
+               e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+                                        nvm->opcode_bits);
+
+               e1000_standby_nvm(hw);
+
+               /*
+                * Some SPI eeproms use the 8th address bit embedded in the
+                * opcode
+                */
+               if ((nvm->address_bits == 8) && (offset >= 128))
+                       write_opcode |= NVM_A8_OPCODE_SPI;
+
+               /* Send the Write command (8-bit opcode + addr) */
+               e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+               e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+                                        nvm->address_bits);
+
+               /* Loop to allow for up to whole page write of eeprom */
+               while (widx < words) {
+                       u16 word_out = data[widx];
+                       word_out = (word_out >> 8) | (word_out << 8);
+                       e1000_shift_out_eec_bits(hw, word_out, 16);
+                       widx++;
+
+                       if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+                               e1000_standby_nvm(hw);
+                               break;
+                       }
+               }
+       }
+
+       msec_delay(10);
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_microwire - Writes EEPROM using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using microwire interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                              u16 *data)
+{
+       struct e1000_nvm_info *nvm = &hw->nvm;
+       s32  ret_val;
+       u32 eecd;
+       u16 words_written = 0;
+       u16 widx = 0;
+
+       DEBUGFUNC("e1000_write_nvm_microwire");
+
+       /*
+        * A check for invalid values:  offset too large, too many words,
+        * and not enough words.
+        */
+       if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+           (words == 0)) {
+               DEBUGOUT("nvm parameter(s) out of bounds\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+       ret_val = nvm->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_ready_nvm_eeprom(hw);
+       if (ret_val)
+               goto release;
+
+       e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+                                (u16)(nvm->opcode_bits + 2));
+
+       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+       e1000_standby_nvm(hw);
+
+       while (words_written < words) {
+               e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+                                        nvm->opcode_bits);
+
+               e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+                                        nvm->address_bits);
+
+               e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+               e1000_standby_nvm(hw);
+
+               for (widx = 0; widx < 200; widx++) {
+                       eecd = E1000_READ_REG(hw, E1000_EECD);
+                       if (eecd & E1000_EECD_DO)
+                               break;
+                       usec_delay(50);
+               }
+
+               if (widx == 200) {
+                       DEBUGOUT("NVM Write did not complete\n");
+                       ret_val = -E1000_ERR_NVM;
+                       goto release;
+               }
+
+               e1000_standby_nvm(hw);
+
+               words_written++;
+       }
+
+       e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+                                (u16)(nvm->opcode_bits + 2));
+
+       e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+       nvm->ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_pba_string_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size)
+{
+       s32 ret_val;
+       u16 nvm_data;
+       u16 pba_ptr;
+       u16 offset;
+       u16 length;
+
+       DEBUGFUNC("e1000_read_pba_string_generic");
+
+       if (pba_num == NULL) {
+               DEBUGOUT("PBA string buffer was null\n");
+               ret_val = E1000_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       /*
+        * if nvm_data is not ptr guard the PBA must be in legacy format which
+        * means pba_ptr is actually our second data word for the PBA number
+        * and we can decode it into an ascii string
+        */
+       if (nvm_data != NVM_PBA_PTR_GUARD) {
+               DEBUGOUT("NVM PBA number is not stored as string\n");
+
+               /* we will need 11 characters to store the PBA */
+               if (pba_num_size < 11) {
+                       DEBUGOUT("PBA string buffer too small\n");
+                       return E1000_ERR_NO_SPACE;
+               }
+
+               /* extract hex string from data and pba_ptr */
+               pba_num[0] = (nvm_data >> 12) & 0xF;
+               pba_num[1] = (nvm_data >> 8) & 0xF;
+               pba_num[2] = (nvm_data >> 4) & 0xF;
+               pba_num[3] = nvm_data & 0xF;
+               pba_num[4] = (pba_ptr >> 12) & 0xF;
+               pba_num[5] = (pba_ptr >> 8) & 0xF;
+               pba_num[6] = '-';
+               pba_num[7] = 0;
+               pba_num[8] = (pba_ptr >> 4) & 0xF;
+               pba_num[9] = pba_ptr & 0xF;
+
+               /* put a null character on the end of our string */
+               pba_num[10] = '\0';
+
+               /* switch all the data but the '-' to hex char */
+               for (offset = 0; offset < 10; offset++) {
+                       if (pba_num[offset] < 0xA)
+                               pba_num[offset] += '0';
+                       else if (pba_num[offset] < 0x10)
+                               pba_num[offset] += 'A' - 0xA;
+               }
+
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (length == 0xFFFF || length == 0) {
+               DEBUGOUT("NVM PBA number section invalid length\n");
+               ret_val = E1000_ERR_NVM_PBA_SECTION;
+               goto out;
+       }
+       /* check if pba_num buffer is big enough */
+       if (pba_num_size < (((u32)length * 2) - 1)) {
+               DEBUGOUT("PBA string buffer too small\n");
+               ret_val = E1000_ERR_NO_SPACE;
+               goto out;
+       }
+
+       /* trim pba length from start of string */
+       pba_ptr++;
+       length--;
+
+       for (offset = 0; offset < length; offset++) {
+               ret_val = hw->nvm.ops.read(hw, pba_ptr + offset, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               pba_num[offset * 2] = (u8)(nvm_data >> 8);
+               pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+       }
+       pba_num[offset * 2] = '\0';
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_pba_length_generic - Read device part number length
+ *  @hw: pointer to the HW structure
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number length from the EEPROM and
+ *  stores the value in pba_num_size.
+ **/
+s32 e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size)
+{
+       s32 ret_val;
+       u16 nvm_data;
+       u16 pba_ptr;
+       u16 length;
+
+       DEBUGFUNC("e1000_read_pba_length_generic");
+
+       if (pba_num_size == NULL) {
+               DEBUGOUT("PBA buffer size was null\n");
+               ret_val = E1000_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+        /* if data is not ptr guard the PBA must be in legacy format */
+       if (nvm_data != NVM_PBA_PTR_GUARD) {
+               *pba_num_size = 11;
+               goto out;
+       }
+
+       ret_val = hw->nvm.ops.read(hw, pba_ptr, 1, &length);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               goto out;
+       }
+
+       if (length == 0xFFFF || length == 0) {
+               DEBUGOUT("NVM PBA number section invalid length\n");
+               ret_val = E1000_ERR_NVM_PBA_SECTION;
+               goto out;
+       }
+
+       /*
+        * Convert from length in u16 values to u8 chars, add 1 for NULL,
+        * and subtract 2 because length field is included in length.
+        */
+       *pba_num_size = ((u32)length * 2) - 1;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+       u32 rar_high;
+       u32 rar_low;
+       u16 i;
+
+       rar_high = E1000_READ_REG(hw, E1000_RAH(0));
+       rar_low = E1000_READ_REG(hw, E1000_RAL(0));
+
+       for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+               hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+       for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+               hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+       for (i = 0; i < ETH_ADDR_LEN; i++)
+               hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+       for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+
+       if (checksum != (u16) NVM_SUM) {
+               DEBUGOUT("NVM Checksum Invalid\n");
+               ret_val = -E1000_ERR_NVM;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 checksum = 0;
+       u16 i, nvm_data;
+
+       DEBUGFUNC("e1000_update_nvm_checksum");
+
+       for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+               ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error while updating checksum.\n");
+                       goto out;
+               }
+               checksum += nvm_data;
+       }
+       checksum = (u16) NVM_SUM - checksum;
+       ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+       if (ret_val)
+               DEBUGOUT("NVM Write Error while updating checksum.\n");
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_reload_nvm_generic - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+static void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+       u32 ctrl_ext;
+
+       DEBUGFUNC("e1000_reload_nvm_generic");
+
+       usec_delay(10);
+       ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+       E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+       E1000_WRITE_FLUSH(hw);
+}
+
diff --git a/lib/librte_pmd_igb/igb/e1000_nvm.h b/lib/librte_pmd_igb/igb/e1000_nvm.h
new file mode 100644 (file)
index 0000000..6bba641
--- /dev/null
@@ -0,0 +1,66 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+void e1000_init_nvm_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_read_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+void e1000_null_nvm_generic(struct e1000_hw *hw);
+s32  e1000_null_led_default(struct e1000_hw *hw, u16 *data);
+s32  e1000_null_write_nvm(struct e1000_hw *hw, u16 a, u16 b, u16 *c);
+s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32  e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+                                   u32 pba_num_size);
+s32  e1000_read_pba_length_generic(struct e1000_hw *hw, u32 *pba_num_size);
+s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                              u16 words, u16 *data);
+s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32  e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                               u16 words, u16 *data);
+s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE  0xDB00
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.c b/lib/librte_pmd_igb/igb/e1000_osdep.c
new file mode 100644 (file)
index 0000000..203dcc8
--- /dev/null
@@ -0,0 +1,72 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+/*
+ * NOTE: the following routines using the e1000 
+ *     naming style are provided to the shared
+ *     code but are OS specific
+ */
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return;
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       *value = 0;
+       return;
+}
+
+/*
+ * Read the PCI Express capabilities
+ */
+int32_t
+e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return E1000_NOT_IMPLEMENTED;
+}
+
+/*
+ * Write the PCI Express capabilities
+ */
+int32_t
+e1000_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       return E1000_NOT_IMPLEMENTED;
+}
diff --git a/lib/librte_pmd_igb/igb/e1000_osdep.h b/lib/librte_pmd_igb/igb/e1000_osdep.h
new file mode 100644 (file)
index 0000000..cf460d5
--- /dev/null
@@ -0,0 +1,128 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+
+#include "../e1000_logs.h"
+
+/* Remove some compiler warnings for the files in this dir */
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:2259) /* conversion may lose significant bits */
+#pragma warning(disable:869)  /* Parameter was never referenced */
+#pragma warning(disable:181)  /* Arg incompatible with format string */
+#else
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Wformat"
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+#endif
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+#define msec_delay_irq(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F)            DEBUGOUT(F);
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
+
+#define FALSE                  0
+#define TRUE                   1
+
+typedef uint64_t       u64;
+typedef uint32_t       u32;
+typedef uint16_t       u16;
+typedef uint8_t                u8;
+typedef int64_t                s64;
+typedef int32_t                s32;
+typedef int16_t                s16;
+typedef int8_t         s8;
+typedef int            bool;
+
+#define __le16         u16
+#define __le32         u32
+#define __le64         u64
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+#define E1000_PCI_REG_WRITE(reg, value) do { \
+       E1000_PCI_REG((reg)) = (value); \
+} while (0)
+
+#define E1000_PCI_REG_ADDR(hw, reg) \
+       ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define E1000_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+       E1000_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+static inline uint32_t e1000_read_addr(volatile void* addr)
+{
+       return E1000_PCI_REG(addr);
+}
+
+/* Register READ/WRITE macros */
+
+#define E1000_READ_REG(hw, reg) \
+       e1000_read_addr(E1000_PCI_REG_ADDR((hw), (reg)))
+
+#define E1000_WRITE_REG(hw, reg, value) \
+       E1000_PCI_REG_WRITE(E1000_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define E1000_READ_REG_ARRAY(hw, reg, index) \
+       E1000_PCI_REG(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \
+       E1000_PCI_REG_WRITE(E1000_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/lib/librte_pmd_igb/igb/e1000_phy.c b/lib/librte_pmd_igb/igb/e1000_phy.c
new file mode 100644 (file)
index 0000000..aede670
--- /dev/null
@@ -0,0 +1,2988 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "e1000_api.h"
+
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw);
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+       0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+       0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+       6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+       26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+       44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+       66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+       87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+       100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+       124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_2_cable_length_table) / \
+                 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  e1000_init_phy_ops_generic - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the function pointers to no-op functions
+ **/
+void e1000_init_phy_ops_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       DEBUGFUNC("e1000_init_phy_ops_generic");
+
+       /* Initialize function pointers */
+       phy->ops.init_params = e1000_null_ops_generic;
+       phy->ops.acquire = e1000_null_ops_generic;
+       phy->ops.check_polarity = e1000_null_ops_generic;
+       phy->ops.check_reset_block = e1000_null_ops_generic;
+       phy->ops.commit = e1000_null_ops_generic;
+       phy->ops.force_speed_duplex = e1000_null_ops_generic;
+       phy->ops.get_cfg_done = e1000_null_ops_generic;
+       phy->ops.get_cable_length = e1000_null_ops_generic;
+       phy->ops.get_info = e1000_null_ops_generic;
+       phy->ops.read_reg = e1000_null_read_reg;
+       phy->ops.read_reg_locked = e1000_null_read_reg;
+       phy->ops.release = e1000_null_phy_generic;
+       phy->ops.reset = e1000_null_ops_generic;
+       phy->ops.set_d0_lplu_state = e1000_null_lplu_state;
+       phy->ops.set_d3_lplu_state = e1000_null_lplu_state;
+       phy->ops.write_reg = e1000_null_write_reg;
+       phy->ops.write_reg_locked = e1000_null_write_reg;
+       phy->ops.power_up = e1000_null_phy_generic;
+       phy->ops.power_down = e1000_null_phy_generic;
+}
+
+/**
+ *  e1000_null_read_reg - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       DEBUGFUNC("e1000_null_read_reg");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_phy_generic - No-op function, return void
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_null_phy_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_null_phy_generic");
+       return;
+}
+
+/**
+ *  e1000_null_lplu_state - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_lplu_state(struct e1000_hw *hw, bool active)
+{
+       DEBUGFUNC("e1000_null_lplu_state");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_null_write_reg - No-op function, return 0
+ *  @hw: pointer to the HW structure
+ **/
+s32 e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       DEBUGFUNC("e1000_null_write_reg");
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+       u32 manc;
+
+       DEBUGFUNC("e1000_check_reset_block");
+
+       manc = E1000_READ_REG(hw, E1000_MANC);
+
+       return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+              E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_id;
+
+       DEBUGFUNC("e1000_get_phy_id");
+
+       if (!(phy->ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+       if (ret_val)
+               goto out;
+
+       phy->id = (u32)(phy_id << 16);
+       usec_delay(20);
+       ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+       if (ret_val)
+               goto out;
+
+       phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+       phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_reset_dsp_generic - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+       if (!(hw->phy.ops.write_reg))
+               goto out;
+
+       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+       if (ret_val)
+               goto out;
+
+       ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, mdic = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+       if (offset > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               return -E1000_ERR_PARAM;
+       }
+
+       /*
+        * Set up Op-code, Phy Address, and register offset in the MDI
+        * Control register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+               (phy->addr << E1000_MDIC_PHY_SHIFT) |
+               (E1000_MDIC_OP_READ));
+
+       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+       /*
+        * Poll the ready bit to see if the MDI read completed
+        * Increasing the time out as testing showed failures with
+        * the lower time out
+        */
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+               usec_delay(50);
+               mdic = E1000_READ_REG(hw, E1000_MDIC);
+               if (mdic & E1000_MDIC_READY)
+                       break;
+       }
+       if (!(mdic & E1000_MDIC_READY)) {
+               DEBUGOUT("MDI Read did not complete\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       if (mdic & E1000_MDIC_ERROR) {
+               DEBUGOUT("MDI Error\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       *data = (u16) mdic;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, mdic = 0;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+       if (offset > MAX_PHY_REG_ADDRESS) {
+               DEBUGOUT1("PHY Address %d is out of range\n", offset);
+               return -E1000_ERR_PARAM;
+       }
+
+       /*
+        * Set up Op-code, Phy Address, and register offset in the MDI
+        * Control register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       mdic = (((u32)data) |
+               (offset << E1000_MDIC_REG_SHIFT) |
+               (phy->addr << E1000_MDIC_PHY_SHIFT) |
+               (E1000_MDIC_OP_WRITE));
+
+       E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+       /*
+        * Poll the ready bit to see if the MDI read completed
+        * Increasing the time out as testing showed failures with
+        * the lower time out
+        */
+       for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+               usec_delay(50);
+               mdic = E1000_READ_REG(hw, E1000_MDIC);
+               if (mdic & E1000_MDIC_READY)
+                       break;
+       }
+       if (!(mdic & E1000_MDIC_READY)) {
+               DEBUGOUT("MDI Write did not complete\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+       if (mdic & E1000_MDIC_ERROR) {
+               DEBUGOUT("MDI Error\n");
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_i2c - Read PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the i2c interface and stores the
+ *  retrieved information in data.
+ **/
+s32 e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, i2ccmd = 0;
+
+       DEBUGFUNC("e1000_read_phy_reg_i2c");
+
+       /*
+        * Set up Op-code, Phy Address, and register address in the I2CCMD
+        * register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+                 (E1000_I2CCMD_OPCODE_READ));
+
+       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+       /* Poll the ready bit to see if the I2C read completed */
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               usec_delay(50);
+               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+               if (i2ccmd & E1000_I2CCMD_READY)
+                       break;
+       }
+       if (!(i2ccmd & E1000_I2CCMD_READY)) {
+               DEBUGOUT("I2CCMD Read did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (i2ccmd & E1000_I2CCMD_ERROR) {
+               DEBUGOUT("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+
+       /* Need to byte-swap the 16-bit value. */
+       *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg_i2c - Write PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       u32 i, i2ccmd = 0;
+       u16 phy_data_swapped;
+
+       DEBUGFUNC("e1000_write_phy_reg_i2c");
+
+       /* Swap the data bytes for the I2C interface */
+       phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+       /*
+        * Set up Op-code, Phy Address, and register address in the I2CCMD
+        * register.  The MAC will take care of interfacing with the
+        * PHY to retrieve the desired data.
+        */
+       i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+                 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+                 E1000_I2CCMD_OPCODE_WRITE |
+                 phy_data_swapped);
+
+       E1000_WRITE_REG(hw, E1000_I2CCMD, i2ccmd);
+
+       /* Poll the ready bit to see if the I2C read completed */
+       for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+               usec_delay(50);
+               i2ccmd = E1000_READ_REG(hw, E1000_I2CCMD);
+               if (i2ccmd & E1000_I2CCMD_READY)
+                       break;
+       }
+       if (!(i2ccmd & E1000_I2CCMD_READY)) {
+               DEBUGOUT("I2CCMD Write did not complete\n");
+               return -E1000_ERR_PHY;
+       }
+       if (i2ccmd & E1000_I2CCMD_ERROR) {
+               DEBUGOUT("I2CCMD Error bit set\n");
+               return -E1000_ERR_PHY;
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_read_phy_reg_m88");
+
+       if (!(hw->phy.ops.acquire))
+               goto out;
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                         data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_m88");
+
+       if (!(hw->phy.ops.acquire))
+               goto out;
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
+
+       hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  __e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+static s32 __e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("__e1000_read_phy_reg_igp");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               ret_val = e1000_write_phy_reg_mdic(hw,
+                                                  IGP01E1000_PHY_PAGE_SELECT,
+                                                  (u16)offset);
+               if (ret_val)
+                       goto release;
+       }
+
+       ret_val = e1000_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                         data);
+
+release:
+       if (!locked)
+               hw->phy.ops.release(hw);
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
+{
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_phy_reg_igp");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (offset > MAX_PHY_MULTI_PAGE_REG) {
+               ret_val = e1000_write_phy_reg_mdic(hw,
+                                                  IGP01E1000_PHY_PAGE_SELECT,
+                                                  (u16)offset);
+               if (ret_val)
+                       goto release;
+       }
+
+       ret_val = e1000_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+                                          data);
+
+release:
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_igp(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_phy_reg_igp(hw, offset, data, TRUE);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
+{
+       u32 kmrnctrlsta;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("__e1000_read_kmrn_reg");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+                      E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+       usec_delay(2);
+
+       kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+       *data = (u16)kmrnctrlsta;
+
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_generic -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+       return __e1000_read_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
+{
+       u32 kmrnctrlsta;
+       s32 ret_val = E1000_SUCCESS;
+
+       DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+       if (!locked) {
+               if (!(hw->phy.ops.acquire))
+                       goto out;
+
+               ret_val = hw->phy.ops.acquire(hw);
+               if (ret_val)
+                       goto out;
+       }
+
+       kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+                      E1000_KMRNCTRLSTA_OFFSET) | data;
+       E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+       usec_delay(2);
+
+       if (!locked)
+               hw->phy.ops.release(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_generic -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, FALSE);
+}
+
+/**
+ *  e1000_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+       return __e1000_write_kmrn_reg(hw, offset, data, TRUE);
+}
+
+/**
+ *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_82577");
+
+       if (hw->phy.reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       if (hw->phy.type == e1000_phy_82580) {
+               ret_val = hw->phy.ops.reset(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error resetting the PHY.\n");
+                       goto out;
+               }
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = hw->phy.ops.read_reg(hw, I82577_CFG_REG, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+       /* Enable downshift */
+       phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+       ret_val = hw->phy.ops.write_reg(hw, I82577_CFG_REG, phy_data);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_m88");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+       /*
+        * Options:
+        *   MDI/MDI-X = 0 (default)
+        *   0 - Auto for all speeds
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+        */
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+       switch (phy->mdix) {
+       case 1:
+               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+               break;
+       case 2:
+               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+               break;
+       case 3:
+               phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+               break;
+       case 0:
+       default:
+               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+               break;
+       }
+
+       /*
+        * Options:
+        *   disable_polarity_correction = 0 (default)
+        *       Automatic Correction for Reversed Cable Polarity
+        *   0 - Disabled
+        *   1 - Enabled
+        */
+       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+       if (phy->disable_polarity_correction == 1)
+               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       if (phy->revision < E1000_REVISION_4) {
+               /*
+                * Force TX_CLK in the Extended PHY Specific Control Register
+                * to 25MHz clock.
+                */
+               ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                            &phy_data);
+               if (ret_val)
+                       goto out;
+
+               phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+               if ((phy->revision == E1000_REVISION_2) &&
+                   (phy->id == M88E1111_I_PHY_ID)) {
+                       /* 82573L PHY - set the downshift counter to 5x. */
+                       phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+                       phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+               } else {
+                       /* Configure Master and Slave downshift values */
+                       phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+                       phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                                    M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+               }
+               ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+                                            phy_data);
+               if (ret_val)
+                       goto out;
+       }
+
+       /* Commit the changes. */
+       ret_val = phy->ops.commit(hw);
+       if (ret_val) {
+               DEBUGOUT("Error committing the PHY changes\n");
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ *  Also enables and sets the downshift parameters.
+ **/
+s32 e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+
+       DEBUGFUNC("e1000_copper_link_setup_m88_gen2");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       /* Enable CRS on Tx. This must be set for half-duplex operation. */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Options:
+        *   MDI/MDI-X = 0 (default)
+        *   0 - Auto for all speeds
+        *   1 - MDI mode
+        *   2 - MDI-X mode
+        *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+        */
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+       switch (phy->mdix) {
+       case 1:
+               phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+               break;
+       case 2:
+               phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+               break;
+       case 3:
+               /* M88E1112 does not support this mode) */
+               if (phy->id != M88E1112_E_PHY_ID) {
+                       phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+                       break;
+               }
+       case 0:
+       default:
+               phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+               break;
+       }
+
+       /*
+        * Options:
+        *   disable_polarity_correction = 0 (default)
+        *       Automatic Correction for Reversed Cable Polarity
+        *   0 - Disabled
+        *   1 - Enabled
+        */
+       phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+       if (phy->disable_polarity_correction == 1)
+               phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+       /* Enable downshift and setting it to X6 */
+       phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+       phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+       phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Commit the changes. */
+       ret_val = phy->ops.commit(hw);
+       if (ret_val) {
+               DEBUGOUT("Error committing the PHY changes\n");
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_copper_link_setup_igp");
+
+       if (phy->reset_disable) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = hw->phy.ops.reset(hw);
+       if (ret_val) {
+               DEBUGOUT("Error resetting the PHY.\n");
+               goto out;
+       }
+
+       /*
+        * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+        * timeout issues when LFS is enabled.
+        */
+       msec_delay(100);
+
+       /* disable lplu d0 during driver init */
+       if (hw->phy.ops.set_d0_lplu_state) {
+               ret_val = hw->phy.ops.set_d0_lplu_state(hw, FALSE);
+               if (ret_val) {
+                       DEBUGOUT("Error Disabling LPLU D0\n");
+                       goto out;
+               }
+       }
+       /* Configure mdi-mdix settings */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+       if (ret_val)
+               goto out;
+
+       data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+       switch (phy->mdix) {
+       case 1:
+               data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+               break;
+       case 2:
+               data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+               break;
+       case 0:
+       default:
+               data |= IGP01E1000_PSCR_AUTO_MDIX;
+               break;
+       }
+       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+       if (ret_val)
+               goto out;
+
+       /* set auto-master slave resolution settings */
+       if (hw->mac.autoneg) {
+               /*
+                * when autonegotiation advertisement is only 1000Mbps then we
+                * should disable SmartSpeed and enable Auto MasterSlave
+                * resolution as hardware default.
+                */
+               if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+                       /* Disable SmartSpeed */
+                       ret_val = phy->ops.read_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+
+                       /* Set auto Master/Slave resolution process */
+                       ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~CR_1000T_MS_ENABLE;
+                       ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+                       if (ret_val)
+                               goto out;
+               }
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+               if (ret_val)
+                       goto out;
+
+               /* load defaults for future use */
+               phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+                       ((data & CR_1000T_MS_VALUE) ?
+                       e1000_ms_force_master :
+                       e1000_ms_force_slave) :
+                       e1000_ms_auto;
+
+               switch (phy->ms_type) {
+               case e1000_ms_force_master:
+                       data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_force_slave:
+                       data |= CR_1000T_MS_ENABLE;
+                       data &= ~(CR_1000T_MS_VALUE);
+                       break;
+               case e1000_ms_auto:
+                       data &= ~CR_1000T_MS_ENABLE;
+               default:
+                       break;
+               }
+               ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_ctrl;
+
+       DEBUGFUNC("e1000_copper_link_autoneg");
+
+       /*
+        * Perform some bounds checking on the autoneg advertisement
+        * parameter.
+        */
+       phy->autoneg_advertised &= phy->autoneg_mask;
+
+       /*
+        * If autoneg_advertised is zero, we assume it was not defaulted
+        * by the calling code so we set to advertise full capability.
+        */
+       if (phy->autoneg_advertised == 0)
+               phy->autoneg_advertised = phy->autoneg_mask;
+
+       DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+       ret_val = e1000_phy_setup_autoneg(hw);
+       if (ret_val) {
+               DEBUGOUT("Error Setting up Auto-Negotiation\n");
+               goto out;
+       }
+       DEBUGOUT("Restarting Auto-Neg\n");
+
+       /*
+        * Restart auto-negotiation by setting the Auto Neg Enable bit and
+        * the Auto Neg Restart bit in the PHY control register.
+        */
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Does the user want to wait for Auto-Neg to complete here, or
+        * check at a later time (for example, callback routine).
+        */
+       if (phy->autoneg_wait_to_complete) {
+               ret_val = hw->mac.ops.wait_autoneg(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error while waiting for "
+                                "autoneg to complete\n");
+                       goto out;
+               }
+       }
+
+       hw->mac.get_link_status = TRUE;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 mii_autoneg_adv_reg;
+       u16 mii_1000t_ctrl_reg = 0;
+
+       DEBUGFUNC("e1000_phy_setup_autoneg");
+
+       phy->autoneg_advertised &= phy->autoneg_mask;
+
+       /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+       ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+       if (ret_val)
+               goto out;
+
+       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+               /* Read the MII 1000Base-T Control Register (Address 9). */
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+                                           &mii_1000t_ctrl_reg);
+               if (ret_val)
+                       goto out;
+       }
+
+       /*
+        * Need to parse both autoneg_advertised and fc and set up
+        * the appropriate PHY registers.  First we will parse for
+        * autoneg_advertised software override.  Since we can advertise
+        * a plethora of combinations, we need to check each bit
+        * individually.
+        */
+
+       /*
+        * First we clear all the 10/100 mb speed bits in the Auto-Neg
+        * Advertisement Register (Address 4) and the 1000 mb speed bits in
+        * the  1000Base-T Control Register (Address 9).
+        */
+       mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+                                NWAY_AR_100TX_HD_CAPS |
+                                NWAY_AR_10T_FD_CAPS   |
+                                NWAY_AR_10T_HD_CAPS);
+       mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+       DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+       /* Do we want to advertise 10 Mb Half Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+               DEBUGOUT("Advertise 10mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+       }
+
+       /* Do we want to advertise 10 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+               DEBUGOUT("Advertise 10mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+       }
+
+       /* Do we want to advertise 100 Mb Half Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+               DEBUGOUT("Advertise 100mb Half duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+       }
+
+       /* Do we want to advertise 100 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+               DEBUGOUT("Advertise 100mb Full duplex\n");
+               mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+       }
+
+       /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+       if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+               DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+
+       /* Do we want to advertise 1000 Mb Full Duplex? */
+       if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+               DEBUGOUT("Advertise 1000mb Full duplex\n");
+               mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+       }
+
+       /*
+        * Check for a software override of the flow control settings, and
+        * setup the PHY advertisement registers accordingly.  If
+        * auto-negotiation is enabled, then software will have to set the
+        * "PAUSE" bits to the correct value in the Auto-Negotiation
+        * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+        * negotiation.
+        *
+        * The possible values of the "fc" parameter are:
+        *      0:  Flow control is completely disabled
+        *      1:  Rx flow control is enabled (we can receive pause frames
+        *          but not send pause frames).
+        *      2:  Tx flow control is enabled (we can send pause frames
+        *          but we do not support receiving pause frames).
+        *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+        *  other:  No software override.  The flow control configuration
+        *          in the EEPROM is used.
+        */
+       switch (hw->fc.current_mode) {
+       case e1000_fc_none:
+               /*
+                * Flow control (Rx & Tx) is completely disabled by a
+                * software over-ride.
+                */
+               mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case e1000_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled, and Tx Flow control is
+                * disabled, by a software over-ride.
+                *
+                * Since there really isn't a way to advertise that we are
+                * capable of Rx Pause ONLY, we will advertise that we
+                * support both symmetric and asymmetric Rx PAUSE.  Later
+                * (in e1000_config_fc_after_link_up) we will disable the
+                * hw's ability to send PAUSE frames.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       case e1000_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled, by a software over-ride.
+                */
+               mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+               mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+               break;
+       case e1000_fc_full:
+               /*
+                * Flow control (both Rx and Tx) is enabled by a software
+                * over-ride.
+                */
+               mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+       if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+               ret_val = phy->ops.write_reg(hw,
+                                             PHY_1000T_CTRL,
+                                             mii_1000t_ctrl_reg);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_generic - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       bool link;
+
+       DEBUGFUNC("e1000_setup_copper_link_generic");
+
+       if (hw->mac.autoneg) {
+               /*
+                * Setup autoneg and flow control advertisement and perform
+                * autonegotiation.
+                */
+               ret_val = e1000_copper_link_autoneg(hw);
+               if (ret_val)
+                       goto out;
+       } else {
+               /*
+                * PHY will be set to 10H, 10F, 100H or 100F
+                * depending on user settings.
+                */
+               DEBUGOUT("Forcing Speed and Duplex\n");
+               ret_val = hw->phy.ops.force_speed_duplex(hw);
+               if (ret_val) {
+                       DEBUGOUT("Error Forcing Speed and Duplex\n");
+                       goto out;
+               }
+       }
+
+       /*
+        * Check link status. Wait up to 100 microseconds for link to become
+        * valid.
+        */
+       ret_val = e1000_phy_has_link_generic(hw,
+                                            COPPER_LINK_UP_LIMIT,
+                                            10,
+                                            &link);
+       if (ret_val)
+               goto out;
+
+       if (link) {
+               DEBUGOUT("Valid link established!!!\n");
+               e1000_config_collision_dist_generic(hw);
+               ret_val = e1000_config_fc_after_link_up_generic(hw);
+       } else {
+               DEBUGOUT("Unable to establish link!!!\n");
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+        * forced whenever speed and duplex are forced.
+        */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+       phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+       ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+       /*
+        * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+        * forced whenever speed and duplex are forced.
+        */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /* Reset the phy to commit changes. */
+       ret_val = hw->phy.ops.commit(hw);
+       if (ret_val)
+               goto out;
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+                                                    100000, &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link) {
+                       if (hw->phy.type != e1000_phy_m88 ||
+                           hw->phy.id == I347AT4_E_PHY_ID ||
+                           hw->phy.id == M88E1340M_E_PHY_ID ||
+                           hw->phy.id == M88E1112_E_PHY_ID) {
+                               DEBUGOUT("Link taking longer than expected.\n");
+                       } else {
+                               /*
+                                * We didn't get link.
+                                * Reset the DSP and cross our fingers.
+                                */
+                               ret_val = phy->ops.write_reg(hw,
+                                               M88E1000_PHY_PAGE_SELECT,
+                                               0x001d);
+                               if (ret_val)
+                                       goto out;
+                               ret_val = e1000_phy_reset_dsp_generic(hw);
+                               if (ret_val)
+                                       goto out;
+                       }
+               }
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+                                                    100000, &link);
+               if (ret_val)
+                       goto out;
+       }
+
+       if (hw->phy.type != e1000_phy_m88 ||
+           hw->phy.id == I347AT4_E_PHY_ID ||
+           hw->phy.id == M88E1340M_E_PHY_ID ||
+           hw->phy.id == M88E1112_E_PHY_ID)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * Resetting the phy means we need to re-force TX_CLK in the
+        * Extended PHY Specific Control Register to 25MHz clock from
+        * the reset value of 2.5MHz.
+        */
+       phy_data |= M88E1000_EPSCR_TX_CLK_25;
+       ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+       if (ret_val)
+               goto out;
+
+       /*
+        * In addition, we must re-enable CRS on Tx for both half and full
+        * duplex.
+        */
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+       ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_ife");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, data);
+       if (ret_val)
+               goto out;
+
+       /* Disable MDI-X support for 10/100 */
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       data &= ~IFE_PMC_AUTO_MDIX;
+       data &= ~IFE_PMC_FORCE_MDIX;
+
+       ret_val = phy->ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+       if (ret_val)
+               goto out;
+
+       DEBUGOUT1("IFE PMC: %X\n", data);
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+       /* Turn off flow control when forcing speed/duplex */
+       hw->fc.current_mode = e1000_fc_none;
+
+       /* Force speed/duplex on the mac */
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+       ctrl &= ~E1000_CTRL_SPD_SEL;
+
+       /* Disable Auto Speed Detection */
+       ctrl &= ~E1000_CTRL_ASDE;
+
+       /* Disable autoneg on the phy */
+       *phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+       /* Forcing Full or Half Duplex? */
+       if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+               ctrl &= ~E1000_CTRL_FD;
+               *phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       } else {
+               ctrl |= E1000_CTRL_FD;
+               *phy_ctrl |= MII_CR_FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       }
+
+       /* Forcing 10mb or 100mb? */
+       if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+               ctrl |= E1000_CTRL_SPD_100;
+               *phy_ctrl |= MII_CR_SPEED_100;
+               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+               DEBUGOUT("Forcing 100mb\n");
+       } else {
+               ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+               *phy_ctrl |= MII_CR_SPEED_10;
+               *phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+               DEBUGOUT("Forcing 10mb\n");
+       }
+
+       e1000_config_collision_dist_generic(hw);
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is TRUE, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 data;
+
+       DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+       if (ret_val)
+               goto out;
+
+       if (!active) {
+               data &= ~IGP02E1000_PM_D3_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                            data);
+               if (ret_val)
+                       goto out;
+               /*
+                * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+                * during Dx states where the power conservation is most
+                * important.  During driver activity we should enable
+                * SmartSpeed, so performance is maintained.
+                */
+               if (phy->smart_speed == e1000_smart_speed_on) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                   IGP01E1000_PHY_PORT_CONFIG,
+                                                   &data);
+                       if (ret_val)
+                               goto out;
+
+                       data |= IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               } else if (phy->smart_speed == e1000_smart_speed_off) {
+                       ret_val = phy->ops.read_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    &data);
+                       if (ret_val)
+                               goto out;
+
+                       data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+                       ret_val = phy->ops.write_reg(hw,
+                                                    IGP01E1000_PHY_PORT_CONFIG,
+                                                    data);
+                       if (ret_val)
+                               goto out;
+               }
+       } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+                  (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+                  (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+               data |= IGP02E1000_PM_D3_LPLU;
+               ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                             data);
+               if (ret_val)
+                       goto out;
+
+               /* When LPLU is enabled, we should disable SmartSpeed */
+               ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                            &data);
+               if (ret_val)
+                       goto out;
+
+               data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+               ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                             data);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_downshift_generic - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, offset, mask;
+
+       DEBUGFUNC("e1000_check_downshift_generic");
+
+       switch (phy->type) {
+       case e1000_phy_m88:
+       case e1000_phy_gg82563:
+               offset  = M88E1000_PHY_SPEC_STATUS;
+               mask    = M88E1000_PSSR_DOWNSHIFT;
+               break;
+       case e1000_phy_igp_2:
+       case e1000_phy_igp_3:
+               offset  = IGP01E1000_PHY_LINK_HEALTH;
+               mask    = IGP01E1000_PLHR_SS_DOWNGRADE;
+               break;
+       default:
+               /* speed downshift not supported */
+               phy->speed_downgraded = FALSE;
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+       if (!ret_val)
+               phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_check_polarity_m88");
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data, offset, mask;
+
+       DEBUGFUNC("e1000_check_polarity_igp");
+
+       /*
+        * Polarity is determined based on the speed of
+        * our connection.
+        */
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+       if (ret_val)
+               goto out;
+
+       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+               offset  = IGP01E1000_PHY_PCS_INIT_REG;
+               mask    = IGP01E1000_PHY_POLARITY_MASK;
+       } else {
+               /*
+                * This really only applies to 10Mbps since
+                * there is no polarity for 100Mbps (always 0).
+                */
+               offset  = IGP01E1000_PHY_PORT_STATUS;
+               mask    = IGP01E1000_PSSR_POLARITY_REVERSED;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & mask)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, offset, mask;
+
+       DEBUGFUNC("e1000_check_polarity_ife");
+
+       /*
+        * Polarity is determined based on the reversal feature being enabled.
+        */
+       if (phy->polarity_correction) {
+               offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+               mask = IFE_PESC_POLARITY_REVERSED;
+       } else {
+               offset = IFE_PHY_SPECIAL_CONTROL;
+               mask = IFE_PSC_FORCE_POLARITY;
+       }
+
+       ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+       if (!ret_val)
+               phy->cable_polarity = (phy_data & mask)
+                                      ? e1000_rev_polarity_reversed
+                                      : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg_generic - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i, phy_status;
+
+       DEBUGFUNC("e1000_wait_autoneg_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               return E1000_SUCCESS;
+
+       /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+       for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               if (phy_status & MII_SR_AUTONEG_COMPLETE)
+                       break;
+               msec_delay(100);
+       }
+
+       /*
+        * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+        * has completed.
+        */
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 i, phy_status;
+
+       DEBUGFUNC("e1000_phy_has_link_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               return E1000_SUCCESS;
+
+       for (i = 0; i < iterations; i++) {
+               /*
+                * Some PHYs require the PHY_STATUS register to be read
+                * twice due to the link bit being sticky.  No harm doing
+                * it across the board.
+                */
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       /*
+                        * If the first read fails, another entity may have
+                        * ownership of the resources, wait and try again to
+                        * see if they have relinquished the resources yet.
+                        */
+                       usec_delay(usec_interval);
+               ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+               if (ret_val)
+                       break;
+               if (phy_status & MII_SR_LINK_STATUS)
+                       break;
+               if (usec_interval >= 1000)
+                       msec_delay_irq(usec_interval/1000);
+               else
+                       usec_delay(usec_interval);
+       }
+
+       *success = (i < iterations) ? TRUE : FALSE;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *     Register Value          Cable Length
+ *     0                       < 50 meters
+ *     1                       50 - 80 meters
+ *     2                       80 - 110 meters
+ *     3                       110 - 140 meters
+ *     4                       > 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, index;
+
+       DEBUGFUNC("e1000_get_cable_length_m88");
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+               M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+       if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+       phy->min_cable_length = e1000_m88_cable_length_table[index];
+       phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+       return ret_val;
+}
+
+s32 e1000_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, phy_data2, index, default_page, is_cm;
+
+       DEBUGFUNC("e1000_get_cable_length_m88_gen2");
+
+       switch (hw->phy.id) {
+       case M88E1340M_E_PHY_ID:
+       case I347AT4_E_PHY_ID:
+               /* Remember the original page select and set it to 7 */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+                                           &default_page);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+               if (ret_val)
+                       goto out;
+
+               /* Get cable length from PHY Cable Diagnostics Control Reg */
+               ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+                                           &phy_data);
+               if (ret_val)
+                       goto out;
+
+               /* Check if the unit of cable length is meters or cm */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+               if (ret_val)
+                       goto out;
+
+               is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+               /* Populate the phy structure with cable length in meters */
+               phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+               phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+               phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+               /* Reset the page selec to its original value */
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+                                            default_page);
+               if (ret_val)
+                       goto out;
+               break;
+       case M88E1112_E_PHY_ID:
+               /* Remember the original page select and set it to 5 */
+               ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+                                           &default_page);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+                                           &phy_data);
+               if (ret_val)
+                       goto out;
+
+               index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+               if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+               }
+
+               phy->min_cable_length = e1000_m88_cable_length_table[index];
+               phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+               phy->cable_length = (phy->min_cable_length +
+                                    phy->max_cable_length) / 2;
+
+               /* Reset the page select to its original value */
+               ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+                                            default_page);
+               if (ret_val)
+                       goto out;
+
+               break;
+       default:
+               ret_val = -E1000_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_data, i, agc_value = 0;
+       u16 cur_agc_index, max_agc_index = 0;
+       u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+       static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+              IGP02E1000_PHY_AGC_A,
+              IGP02E1000_PHY_AGC_B,
+              IGP02E1000_PHY_AGC_C,
+              IGP02E1000_PHY_AGC_D
+       };
+
+       DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+       /* Read the AGC registers for all channels */
+       for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+               ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+               if (ret_val)
+                       goto out;
+
+               /*
+                * Getting bits 15:9, which represent the combination of
+                * coarse and fine gain values.  The result is a number
+                * that can be put into the lookup table to obtain the
+                * approximate cable length.
+                */
+               cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                               IGP02E1000_AGC_LENGTH_MASK;
+
+               /* Array index bound check. */
+               if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+                   (cur_agc_index == 0)) {
+                       ret_val = -E1000_ERR_PHY;
+                       goto out;
+               }
+
+               /* Remove min & max AGC values from calculation. */
+               if (e1000_igp_2_cable_length_table[min_agc_index] >
+                   e1000_igp_2_cable_length_table[cur_agc_index])
+                       min_agc_index = cur_agc_index;
+               if (e1000_igp_2_cable_length_table[max_agc_index] <
+                   e1000_igp_2_cable_length_table[cur_agc_index])
+                       max_agc_index = cur_agc_index;
+
+               agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+       }
+
+       agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                     e1000_igp_2_cable_length_table[max_agc_index]);
+       agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+       /* Calculate cable length with the error range of +/- 10 meters. */
+       phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                                (agc_value - IGP02E1000_AGC_RANGE) : 0;
+       phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+       phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32  ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_m88");
+
+       if (phy->media_type != e1000_media_type_copper) {
+               DEBUGOUT("Phy info is only valid for copper media\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+                                  ? TRUE : FALSE;
+
+       ret_val = e1000_check_polarity_m88(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+       if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+               ret_val = hw->phy.ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               /* Set values to "undefined" */
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_igp");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       phy->polarity_correction = TRUE;
+
+       ret_val = e1000_check_polarity_igp(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+       if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+               ret_val = phy->ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_ife");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+       if (ret_val)
+               goto out;
+       phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+                                  ? FALSE : TRUE;
+
+       if (phy->polarity_correction) {
+               ret_val = e1000_check_polarity_ife(hw);
+               if (ret_val)
+                       goto out;
+       } else {
+               /* Polarity is forced */
+               phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+       }
+
+       ret_val = phy->ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+
+       /* The following parameters are undefined for 10/100 operation. */
+       phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+       phy->local_rx = e1000_1000t_rx_status_undefined;
+       phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_sw_reset_generic - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+       s32 ret_val = E1000_SUCCESS;
+       u16 phy_ctrl;
+
+       DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+       if (!(hw->phy.ops.read_reg))
+               goto out;
+
+       ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       phy_ctrl |= MII_CR_RESET;
+       ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+       if (ret_val)
+               goto out;
+
+       usec_delay(1);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val = E1000_SUCCESS;
+       u32 ctrl;
+
+       DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+       ret_val = phy->ops.check_reset_block(hw);
+       if (ret_val) {
+               ret_val = E1000_SUCCESS;
+               goto out;
+       }
+
+       ret_val = phy->ops.acquire(hw);
+       if (ret_val)
+               goto out;
+
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+       E1000_WRITE_FLUSH(hw);
+
+       usec_delay(phy->reset_delay_us);
+
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+       E1000_WRITE_FLUSH(hw);
+
+       usec_delay(150);
+
+       phy->ops.release(hw);
+
+       ret_val = phy->ops.get_cfg_done(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_generic - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_get_cfg_done_generic");
+
+       msec_delay_irq(10);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+       DEBUGOUT("Running IGP 3 PHY init script\n");
+
+       /* PHY init IGP 3 */
+       /* Enable rise/fall, 10-mode work in class-A */
+       hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+       /* Remove all caps from Replica path filter */
+       hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+       /* Bias trimming for ADC, AFE and Driver (Default) */
+       hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+       /* Increase Hybrid poly bias */
+       hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+       /* Add 4% to Tx amplitude in Gig mode */
+       hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+       /* Disable trimming (TTT) */
+       hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+       /* Poly DC correction to 94.6% + 2% for all channels */
+       hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+       /* ABS DC correction to 95.9% */
+       hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+       /* BG temp curve trim */
+       hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+       /* Increasing ADC OPAMP stage 1 currents to max */
+       hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+       /* Force 1000 ( required for enabling PHY regs configuration) */
+       hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+       /* Set upd_freq to 6 */
+       hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+       /* Disable NPDFE */
+       hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+       /* Disable adaptive fixed FFE (Default) */
+       hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+       /* Enable FFE hysteresis */
+       hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+       /* Fixed FFE for short cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+       /* Fixed FFE for medium cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+       /* Fixed FFE for long cable lengths */
+       hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+       /* Enable Adaptive Clip Threshold */
+       hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+       /* AHT reset limit to 1 */
+       hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+       /* Set AHT master delay to 127 msec */
+       hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+       /* Set scan bits for AHT */
+       hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+       /* Set AHT Preset bits */
+       hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+       /* Change integ_factor of channel A to 3 */
+       hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+       /* Change prop_factor of channels BCD to 8 */
+       hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+       /* Change cg_icount + enable integbp for channels BCD */
+       hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+       /*
+        * Change cg_icount + enable integbp + change prop_factor_master
+        * to 8 for channel A
+        */
+       hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+       /* Disable AHT in Slave mode on channel A */
+       hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+       /*
+        * Enable LPLU and disable AN to 1000 in non-D0a states,
+        * Enable SPD+B2B
+        */
+       hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+       /* Enable restart AN on an1000_dis change */
+       hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+       /* Enable wh_fifo read clock in 10/100 modes */
+       hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+       /* Restart AN, Speed selection is 1000 */
+       hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+       enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+       switch (phy_id) {
+       case M88E1000_I_PHY_ID:
+       case M88E1000_E_PHY_ID:
+       case M88E1111_I_PHY_ID:
+       case M88E1011_I_PHY_ID:
+       case I347AT4_E_PHY_ID:
+       case M88E1112_E_PHY_ID:
+       case M88E1340M_E_PHY_ID:
+               phy_type = e1000_phy_m88;
+               break;
+       case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+               phy_type = e1000_phy_igp_2;
+               break;
+       case GG82563_E_PHY_ID:
+               phy_type = e1000_phy_gg82563;
+               break;
+       case IGP03E1000_E_PHY_ID:
+               phy_type = e1000_phy_igp_3;
+               break;
+       case IFE_E_PHY_ID:
+       case IFE_PLUS_E_PHY_ID:
+       case IFE_C_E_PHY_ID:
+               phy_type = e1000_phy_ife;
+               break;
+       case I82580_I_PHY_ID:
+               phy_type = e1000_phy_82580;
+               break;
+       default:
+               phy_type = e1000_phy_unknown;
+               break;
+       }
+       return phy_type;
+}
+
+/**
+ *  e1000_determine_phy_address - Determines PHY address.
+ *  @hw: pointer to the HW structure
+ *
+ *  This uses a trial and error method to loop through possible PHY
+ *  addresses. It tests each by reading the PHY ID registers and
+ *  checking for a match.
+ **/
+s32 e1000_determine_phy_address(struct e1000_hw *hw)
+{
+       s32 ret_val = -E1000_ERR_PHY_TYPE;
+       u32 phy_addr = 0;
+       u32 i;
+       enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+       hw->phy.id = phy_type;
+
+       for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+               hw->phy.addr = phy_addr;
+               i = 0;
+
+               do {
+                       e1000_get_phy_id(hw);
+                       phy_type = e1000_get_phy_type_from_id(hw->phy.id);
+
+                       /*
+                        * If phy_type is valid, break - we found our
+                        * PHY address
+                        */
+                       if (phy_type != e1000_phy_unknown) {
+                               ret_val = E1000_SUCCESS;
+                               goto out;
+                       }
+                       msec_delay(1);
+                       i++;
+               } while (i < 10);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+       u16 mii_reg = 0;
+
+       /* The PHY will retain its settings across a power down/up cycle */
+       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+       mii_reg &= ~MII_CR_POWER_DOWN;
+       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+       u16 mii_reg = 0;
+
+       /* The PHY will retain its settings across a power down/up cycle */
+       hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+       mii_reg |= MII_CR_POWER_DOWN;
+       hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+       msec_delay(1);
+}
+
+/**
+ *  e1000_check_polarity_82577 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("e1000_check_polarity_82577");
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+
+       if (!ret_val)
+               phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+                                     ? e1000_rev_polarity_reversed
+                                     : e1000_rev_polarity_normal;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data;
+       bool link;
+
+       DEBUGFUNC("e1000_phy_force_speed_duplex_82577");
+
+       ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+       if (ret_val)
+               goto out;
+
+       e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+       ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+       if (ret_val)
+               goto out;
+
+       usec_delay(1);
+
+       if (phy->autoneg_wait_to_complete) {
+               DEBUGOUT("Waiting for forced speed/duplex link on 82577 phy\n");
+
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+
+               if (!link)
+                       DEBUGOUT("Link taking longer than expected.\n");
+
+               /* Try once more */
+               ret_val = e1000_phy_has_link_generic(hw,
+                                                    PHY_FORCE_LIMIT,
+                                                    100000,
+                                                    &link);
+               if (ret_val)
+                       goto out;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 data;
+       bool link;
+
+       DEBUGFUNC("e1000_get_phy_info_82577");
+
+       ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+       if (ret_val)
+               goto out;
+
+       if (!link) {
+               DEBUGOUT("Phy info is only valid if link is up\n");
+               ret_val = -E1000_ERR_CONFIG;
+               goto out;
+       }
+
+       phy->polarity_correction = TRUE;
+
+       ret_val = e1000_check_polarity_82577(hw);
+       if (ret_val)
+               goto out;
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data);
+       if (ret_val)
+               goto out;
+
+       phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? TRUE : FALSE;
+
+       if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+           I82577_PHY_STATUS2_SPEED_1000MBPS) {
+               ret_val = hw->phy.ops.get_cable_length(hw);
+               if (ret_val)
+                       goto out;
+
+               ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+               if (ret_val)
+                       goto out;
+
+               phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+                               ? e1000_1000t_rx_status_ok
+                               : e1000_1000t_rx_status_not_ok;
+
+               phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+                                ? e1000_1000t_rx_status_ok
+                                : e1000_1000t_rx_status_not_ok;
+       } else {
+               phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+               phy->local_rx = e1000_1000t_rx_status_undefined;
+               phy->remote_rx = e1000_1000t_rx_status_undefined;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+       struct e1000_phy_info *phy = &hw->phy;
+       s32 ret_val;
+       u16 phy_data, length;
+
+       DEBUGFUNC("e1000_get_cable_length_82577");
+
+       ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+       if (ret_val)
+               goto out;
+
+       length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+                I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+       if (length == E1000_CABLE_LENGTH_UNDEFINED)
+               ret_val = -E1000_ERR_PHY;
+
+       phy->cable_length = length;
+
+out:
+       return ret_val;
+}
diff --git a/lib/librte_pmd_igb/igb/e1000_phy.h b/lib/librte_pmd_igb/igb/e1000_phy.h
new file mode 100644 (file)
index 0000000..1b21430
--- /dev/null
@@ -0,0 +1,217 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+void e1000_init_phy_ops_generic(struct e1000_hw *hw);
+s32  e1000_null_read_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+void e1000_null_phy_generic(struct e1000_hw *hw);
+s32  e1000_null_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_null_write_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_check_downshift_generic(struct e1000_hw *hw);
+s32  e1000_check_polarity_m88(struct e1000_hw *hw);
+s32  e1000_check_polarity_igp(struct e1000_hw *hw);
+s32  e1000_check_polarity_ife(struct e1000_hw *hw);
+s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32  e1000_get_phy_id(struct e1000_hw *hw);
+s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32  e1000_get_phy_info_ife(struct e1000_hw *hw);
+s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
+s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
+s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                                u32 usec_interval, bool *success);
+s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+enum e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+s32  e1000_determine_phy_address(struct e1000_hw *hw);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32  e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_copper_link_setup_82577(struct e1000_hw *hw);
+s32  e1000_check_polarity_82577(struct e1000_hw *hw);
+s32  e1000_get_phy_info_82577(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+s32  e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+#define E1000_MAX_PHY_ADDR                4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
+#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT                    5
+#define PHY_REG_MASK                      0x1F
+
+#define HV_INTC_FC_PAGE_START             768
+#define I82578_ADDR_REG                   29
+#define I82577_ADDR_REG                   16
+#define I82577_CFG_REG                    22
+#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG                   23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2            18
+#define I82577_PHY_LBK_CTRL          19
+#define I82577_PHY_STATUS_2          26
+#define I82577_PHY_DIAG_STATUS       31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82577_PHY_STATUS2_MDIX           0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82577_PHY_STATUS2_SPEED_100MBPS  0x0100
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT        0xE14
+#define E1000_82580_PM_SPD                0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU            0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU            0x0004 /* For all other states */
+
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD          0x0010
+#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0800
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define IGP03E1000_PHY_MISC_CTRL          0x1B
+#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
+#define E1000_KMRNCTRLSTA_REN             0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS        0x4    /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM    0x9    /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE   0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED    0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
+#define IFE_PSC_FORCE_POLARITY             0x0020
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_regs.h b/lib/librte_pmd_igb/igb/e1000_regs.h
new file mode 100644 (file)
index 0000000..6b902ea
--- /dev/null
@@ -0,0 +1,574 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
+#define E1000_REGISTER_SET_SIZE        0x20000 /* CSR Size */
+#define E1000_EEPROM_INIT_CTRL_WORD_2  0x0F /* EEPROM Init Ctrl Word 2 */
+#define E1000_BARCTRL                  0x5BBC /* BAR ctrl reg */
+#define E1000_BARCTRL_FLSIZE           0x0700 /* BAR ctrl Flsize */
+#define E1000_BARCTRL_CSRSIZE          0x2000 /* BAR ctrl CSR size */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FEXT     0x0002C  /* Future Extended - RW */
+#define E1000_FEXTNVM4 0x00024  /* Future Extended NVM 4 - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* Rx Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL     0x00400  /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
+#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
+#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_VPDDIAG  0x01060  /* VPD Diagnostic - RO */
+#define E1000_ICR_V2   0x01500  /* Interrupt Cause - new location - RC */
+#define E1000_ICS_V2   0x01504  /* Interrupt Cause Set - new location - WO */
+#define E1000_IMS_V2   0x01508  /* Interrupt Mask Set/Read - new location - RW */
+#define E1000_IMC_V2   0x0150C  /* Interrupt Mask Clear - new location - WO */
+#define E1000_IAM_V2   0x01510  /* Interrupt Ack Auto Mask - new location - RW */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
+#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
+#define E1000_PBDIAG   0x02458  /* Packet Buffer Diagnostic - RW */
+#define E1000_RXPBS    0x02404  /* Rx Packet Buffer Size - RW */
+#define E1000_IRPBS 0x02404 /* Same as RXPBS, renamed for newer adapters - RW */
+#define E1000_PBRWAC   0x024E8 /* Rx packet buffer wrap around counter - RO */
+#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
+#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)      ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \
+                                         (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)      ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \
+                                         (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)      ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \
+                                         (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)     ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : \
+                                         (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)        ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \
+                                         (0x0C010 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n)      ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+                                         (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n)
+#define E1000_RDT(_n)        ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \
+                                         (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)     ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \
+                                         (0x0C028 + ((_n) * 0x40)))
+#define E1000_RQDPC(_n)      ((_n) < 4 ? (0x02830 + ((_n) * 0x100)) : \
+                                         (0x0C030 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)      ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \
+                                         (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)      ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \
+                                         (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)      ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \
+                                         (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)        ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \
+                                         (0x0E010 + ((_n) * 0x40)))
+#define E1000_TXCTL(_n)      ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+                                         (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDT(_n)        ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \
+                                         (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)     ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \
+                                         (0x0E028 + ((_n) * 0x40)))
+#define E1000_TDWBAL(_n)     ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : \
+                                         (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)     ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : \
+                                         (0x0E03C + ((_n) * 0x40)))
+#define E1000_TARC(_n)                   (0x03840 + ((_n) * 0x100))
+#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                                       (0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                                       (0x054E4 + ((_i - 16) * 8)))
+#define E1000_SHRAL(_i)         (0x05438 + ((_i) * 8))
+#define E1000_SHRAH(_i)         (0x0543C + ((_i) * 8))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_PBSLAC   0x03100  /* Packet Buffer Slave Access Control */
+#define E1000_PBSLAD(_n)  (0x03110 + (0x4 * (_n)))  /* Packet Buffer DWORD (_n) */
+#define E1000_TXPBS    0x03404  /* Tx Packet Buffer Size - RW */
+#define E1000_ITPBS   0x03404   /* Same as TXPBS, renamed for newer adpaters - RW */
+#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
+#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
+#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
+#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
+#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
+#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
+#define E1000_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define E1000_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
+#define E1000_DTXMXSZRQ  0x03540 /* DMA Tx Max Total Allow Size Requests - RW */
+#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Pkt Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Abs Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Pkt Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Abs Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Min Thresh Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Desc Min Thresh Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+
+#define E1000_VFGPRC   0x00F10
+#define E1000_VFGORC   0x00F18
+#define E1000_VFMPRC   0x00F3C
+#define E1000_VFGPTC   0x00F14
+#define E1000_VFGOTC   0x00F34
+#define E1000_VFGOTLBC 0x00F50
+#define E1000_VFGPTLBC 0x00F44
+#define E1000_VFGORLBC 0x00F48
+#define E1000_VFGPRLBC 0x00F40
+/* Virtualization statistical counters */
+#define E1000_PFVFGPRC(_n)   (0x010010 + (0x100 * (_n)))
+#define E1000_PFVFGPTC(_n)   (0x010014 + (0x100 * (_n)))
+#define E1000_PFVFGORC(_n)   (0x010018 + (0x100 * (_n)))
+#define E1000_PFVFGOTC(_n)   (0x010034 + (0x100 * (_n)))
+#define E1000_PFVFMPRC(_n)   (0x010038 + (0x100 * (_n)))
+#define E1000_PFVFGPRLBC(_n) (0x010040 + (0x100 * (_n)))
+#define E1000_PFVFGPTLBC(_n) (0x010044 + (0x100 * (_n)))
+#define E1000_PFVFGORLBC(_n) (0x010048 + (0x100 * (_n)))
+#define E1000_PFVFGOTLBC(_n) (0x010050 + (0x100 * (_n)))
+
+#define E1000_LSECTXUT        0x04300  /* LinkSec Tx Untagged Packet Count - OutPktsUntagged */
+#define E1000_LSECTXPKTE      0x04304  /* LinkSec Encrypted Tx Packets Count - OutPktsEncrypted */
+#define E1000_LSECTXPKTP      0x04308  /* LinkSec Protected Tx Packet Count - OutPktsProtected */
+#define E1000_LSECTXOCTE      0x0430C  /* LinkSec Encrypted Tx Octets Count - OutOctetsEncrypted */
+#define E1000_LSECTXOCTP      0x04310  /* LinkSec Protected Tx Octets Count - OutOctetsProtected */
+#define E1000_LSECRXUT        0x04314  /* LinkSec Untagged non-Strict Rx Packet Count - InPktsUntagged/InPktsNoTag */
+#define E1000_LSECRXOCTD      0x0431C  /* LinkSec Rx Octets Decrypted Count - InOctetsDecrypted */
+#define E1000_LSECRXOCTV      0x04320  /* LinkSec Rx Octets Validated - InOctetsValidated */
+#define E1000_LSECRXBAD       0x04324  /* LinkSec Rx Bad Tag - InPktsBadTag */
+#define E1000_LSECRXNOSCI     0x04328  /* LinkSec Rx Packet No SCI Count - InPktsNoSci */
+#define E1000_LSECRXUNSCI     0x0432C  /* LinkSec Rx Packet Unknown SCI Count - InPktsUnknownSci */
+#define E1000_LSECRXUNCH      0x04330  /* LinkSec Rx Unchecked Packets Count - InPktsUnchecked */
+#define E1000_LSECRXDELAY     0x04340  /* LinkSec Rx Delayed Packet Count - InPktsDelayed */
+#define E1000_LSECRXLATE      0x04350  /* LinkSec Rx Late Packets Count - InPktsLate */
+#define E1000_LSECRXOK(_n)    (0x04360 + (0x04 * (_n))) /* LinkSec Rx Packet OK Count - InPktsOk */
+#define E1000_LSECRXINV(_n)   (0x04380 + (0x04 * (_n))) /* LinkSec Rx Invalid Count - InPktsInvalid */
+#define E1000_LSECRXNV(_n)    (0x043A0 + (0x04 * (_n))) /* LinkSec Rx Not Valid Count - InPktsNotValid */
+#define E1000_LSECRXUNSA      0x043C0  /* LinkSec Rx Unused SA Count - InPktsUnusedSa */
+#define E1000_LSECRXNUSA      0x043D0  /* LinkSec Rx Not Using SA Count - InPktsNotUsingSa */
+#define E1000_LSECTXCAP       0x0B000  /* LinkSec Tx Capabilities Register - RO */
+#define E1000_LSECRXCAP       0x0B300  /* LinkSec Rx Capabilities Register - RO */
+#define E1000_LSECTXCTRL      0x0B004  /* LinkSec Tx Control - RW */
+#define E1000_LSECRXCTRL      0x0B304  /* LinkSec Rx Control - RW */
+#define E1000_LSECTXSCL       0x0B008  /* LinkSec Tx SCI Low - RW */
+#define E1000_LSECTXSCH       0x0B00C  /* LinkSec Tx SCI High - RW */
+#define E1000_LSECTXSA        0x0B010  /* LinkSec Tx SA0 - RW */
+#define E1000_LSECTXPN0       0x0B018  /* LinkSec Tx SA PN 0 - RW */
+#define E1000_LSECTXPN1       0x0B01C  /* LinkSec Tx SA PN 1 - RW */
+#define E1000_LSECRXSCL       0x0B3D0  /* LinkSec Rx SCI Low - RW */
+#define E1000_LSECRXSCH       0x0B3E0  /* LinkSec Rx SCI High - RW */
+#define E1000_LSECTXKEY0(_n)  (0x0B020 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 0 - WO */
+#define E1000_LSECTXKEY1(_n)  (0x0B030 + (0x04 * (_n))) /* LinkSec Tx 128-bit Key 1 - WO */
+#define E1000_LSECRXSA(_n)    (0x0B310 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
+#define E1000_LSECRXPN(_n)    (0x0B330 + (0x04 * (_n))) /* LinkSec Rx SAs - RW */
+/*
+ * LinkSec Rx Keys  - where _n is the SA no. and _m the 4 dwords of the 128 bit
+ * key - RW.
+ */
+#define E1000_LSECRXKEY(_n, _m) (0x0B350 + (0x10 * (_n)) + (0x04 * (_m)))
+
+#define E1000_SSVPC             0x041A0  /* Switch Security Violation Packet Count */
+#define E1000_IPSCTRL           0xB430   /* IpSec Control Register */
+#define E1000_IPSRXCMD          0x0B408  /* IPSec Rx Command Register - RW */
+#define E1000_IPSRXIDX          0x0B400  /* IPSec Rx Index - RW */
+#define E1000_IPSRXIPADDR(_n)   (0x0B420+ (0x04 * (_n)))  /* IPSec Rx IPv4/v6 Address - RW */
+#define E1000_IPSRXKEY(_n)      (0x0B410 + (0x04 * (_n))) /* IPSec Rx 128-bit Key - RW */
+#define E1000_IPSRXSALT         0x0B404  /* IPSec Rx Salt - RW */
+#define E1000_IPSRXSPI          0x0B40C  /* IPSec Rx SPI - RW */
+#define E1000_IPSTXKEY(_n)      (0x0B460 + (0x04 * (_n))) /* IPSec Tx 128-bit Key - RW */
+#define E1000_IPSTXSALT         0x0B454  /* IPSec Tx Salt - RW */
+#define E1000_IPSTXIDX          0x0B450  /* IPSec Tx SA IDX - RW */
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
+#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_RA2      0x054E0  /* 2nd half of receive address array - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
+#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+#define E1000_FHFT(_n)  (0x09000 + (_n * 0x100)) /* Flexible Host Filter Table */
+#define E1000_FHFT_EXT(_n) (0x09A00 + (_n * 0x100)) /* Ext Flexible Host Filter Table */
+
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA      0x0003C /* PHY address - RW */
+#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
+#define E1000_MDEF(_n)    (0x05890 + (4 * (_n))) /* Mngmt Decision Filters */
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_GCR2        0x05B64 /* PCI-Ex Control #2 */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_SWSM2     0x05B58 /* Driver-only SW semaphore (not used by BOOT agents) */
+#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+#define E1000_UFUSE     0x05B78 /* UFUSE - RO */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Interface Control */
+#define E1000_FWSTS     0x08F0C /* FW Status */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register
+                                                    * (_i) - RW */
+#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr
+                                                       * low reg - RW */
+#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr
+                                                       * upper reg - RW */
+#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry
+                                                       * message reg - RW */
+#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry
+                                                       * vector ctrl reg - RW */
+#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* VT Registers */
+#define E1000_SWPBS     0x03004 /* Switch Packet Buffer Size - RW */
+#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE      0x00C8C /* VF Receive Enables */
+#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
+#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR      0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
+#define E1000_VMRCTL    0X05D80 /* Virtual Mirror Rule Control */
+#define E1000_VMRVLAN   0x05D90 /* Virtual Mirror Rule VLAN */
+#define E1000_VMRVM     0x05DA0 /* Virtual Mirror Rule VM */
+#define E1000_MDFB      0x03558 /* Malicious Driver free block */
+#define E1000_LVMMC     0x03548 /* Last VM Misbehavior cause */
+#define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_SCCRL     0x05DB0 /* Storm Control Control */
+#define E1000_BSCTRH    0x05DB8 /* Broadcast Storm Control Threshold */
+#define E1000_MSCTRH    0x05DBC /* Multicast Storm Control Threshold */
+/* These act per VF so an array friendly macro is used */
+#define E1000_V2PMAILBOX(_n)   (0x00C40 + (4 * (_n)))
+#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_VFVMBMEM(_n)     (0x00800 + (_n))
+#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
+                                                       * Filter - RW */
+#define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
+#define E1000_DVMOLR(_n)       (0x0C038 + (0x40 * (_n))) /* DMA VM offload */
+/* Time Sync */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n)  (0x05980 + (4 * (_n))) /* Source Address Queue Fltr */
+#define E1000_DAQF(_n)  (0x059A0 + (4 * (_n))) /* Dest Address Queue Fltr */
+#define E1000_SPQF(_n)  (0x059C0 + (4 * (_n))) /* Source Port Queue Fltr */
+#define E1000_FTQF(_n)  (0x059E0 + (4 * (_n))) /* 5-tuple Queue Fltr */
+#define E1000_TTQF(_n)  (0x059E0 + (4 * (_n))) /* 2-tuple Queue Fltr */
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RTTDCS            0x3600  /* Reedtown Tx Desc plane control and status */
+#define E1000_RTTPCS            0x3474  /* Reedtown Tx Packet Plane control and status */
+#define E1000_RTRPCS            0x2474  /* Rx packet plane control and status */
+#define E1000_RTRUP2TC          0x05AC4 /* Rx User Priority to Traffic Class */
+#define E1000_RTTUP2TC          0x0418  /* Transmit User Priority to Traffic Class */
+#define E1000_RTTDTCRC(_n)      (0x3610 + ((_n) * 4)) /* Tx Desc plane TC Rate-scheduler config */
+#define E1000_RTTPTCRC(_n)      (0x3480 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTRPTCRC(_n)      (0x2480 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Config */
+#define E1000_RTTDTCRS(_n)      (0x3630 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler Status */
+#define E1000_RTTDTCRM(_n)      (0x3650 + ((_n) * 4)) /* Tx Desc Plane TC Rate-Scheduler MMW */
+#define E1000_RTTPTCRS(_n)      (0x34A0 + ((_n) * 4)) /* Tx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTTPTCRM(_n)      (0x34C0 + ((_n) * 4)) /* Tx Packet plane TC Rate-scheduler MMW */
+#define E1000_RTRPTCRS(_n)      (0x24A0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler Status */
+#define E1000_RTRPTCRM(_n)      (0x24C0 + ((_n) * 4)) /* Rx Packet plane TC Rate-Scheduler MMW */
+#define E1000_RTTDVMRM(_n)      (0x3670 + ((_n) * 4)) /* Tx Desc plane VM Rate-Scheduler MMW*/
+#define E1000_RTTBCNRM(_n)      (0x3690 + ((_n) * 4)) /* Tx BCN Rate-Scheduler MMW */
+#define E1000_RTTDQSEL          0x3604  /* Tx Desc Plane Queue Select */
+#define E1000_RTTDVMRC          0x3608  /* Tx Desc Plane VM Rate-Scheduler Config */
+#define E1000_RTTDVMRS          0x360C  /* Tx Desc Plane VM Rate-Scheduler Status */
+#define E1000_RTTBCNRC          0x36B0  /* Tx BCN Rate-Scheduler Config */
+#define E1000_RTTBCNRS          0x36B4  /* Tx BCN Rate-Scheduler Status */
+#define E1000_RTTBCNCR          0xB200  /* Tx BCN Control Register */
+#define E1000_RTTBCNTG          0x35A4  /* Tx BCN Tagging */
+#define E1000_RTTBCNCP          0xB208  /* Tx BCN Congestion point */
+#define E1000_RTRBCNCR          0xB20C  /* Rx BCN Control Register */
+#define E1000_RTTBCNRD          0x36B8  /* Tx BCN Rate Drift */
+#define E1000_PFCTOP            0x1080  /* Priority Flow Control Type and Opcode */
+#define E1000_RTTBCNIDX         0xB204  /* Tx BCN Congestion Point */
+#define E1000_RTTBCNACH         0x0B214 /* Tx BCN Control High */
+#define E1000_RTTBCNACL         0x0B210 /* Tx BCN Control Low */
+
+/* DMA Coalescing registers */
+#define E1000_DMACR             0x02508 /* Control Register */
+#define E1000_DMCTXTH           0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX            0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH           0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT            0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC             0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC          0x05BB8 /* PCIE misc config register */
+
+/* PCIe Parity Status Register */
+#define E1000_PCIEERRSTS        0x05BA8
+
+#define E1000_PROXYS            0x5F64 /* Proxying Status */
+#define E1000_PROXYFC           0x5F60 /* Proxying Filter Control */
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT             0x08100 /* Junction Temperature */
+#define E1000_THLOWTC           0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC           0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC          0x0810C /* High Threshold Control */
+#define E1000_THSTAT            0x08110 /* Thermal Sensor Status */
+
+/*Energy Efficient Ethernet "EEE" registers */
+#define E1000_IPCNFG            0x0E38 /* Internal PHY Configuration */
+#define E1000_LTRC              0x01A0 /* Latency Tolerance Reporting Control */
+#define E1000_EEER              0x0E30 /* Energy Efficient Ethernet "EEE"*/
+#define E1000_EEE_SU            0x0E34 /* EEE Setup */
+#define E1000_TLPIC             0x4148 /* EEE Tx LPI Count - TLPIC */
+#define E1000_RLPIC             0x414C /* EEE Rx LPI Count - RLPIC */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC            0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC           0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC           0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC            0x0415C /* OS2BMC packets transmitted by host */
+
+#endif
diff --git a/lib/librte_pmd_igb/igb/e1000_vf.c b/lib/librte_pmd_igb/igb/e1000_vf.c
new file mode 100644 (file)
index 0000000..8b81e4b
--- /dev/null
@@ -0,0 +1,574 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "e1000_api.h"
+
+
+static s32       e1000_init_phy_params_vf(struct e1000_hw *hw);
+static s32       e1000_init_nvm_params_vf(struct e1000_hw *hw);
+static void      e1000_release_vf(struct e1000_hw *hw);
+static s32       e1000_acquire_vf(struct e1000_hw *hw);
+static s32       e1000_setup_link_vf(struct e1000_hw *hw);
+static s32       e1000_get_bus_info_pcie_vf(struct e1000_hw *hw);
+static s32       e1000_init_mac_params_vf(struct e1000_hw *hw);
+static s32       e1000_check_for_link_vf(struct e1000_hw *hw);
+static s32       e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex);
+static s32       e1000_init_hw_vf(struct e1000_hw *hw);
+static s32       e1000_reset_hw_vf(struct e1000_hw *hw);
+static void      e1000_update_mc_addr_list_vf(struct e1000_hw *hw, u8 *, u32);
+static void      e1000_rar_set_vf(struct e1000_hw *, u8 *, u32);
+static s32       e1000_read_mac_addr_vf(struct e1000_hw *);
+
+/**
+ *  e1000_init_phy_params_vf - Inits PHY params
+ *  @hw: pointer to the HW structure
+ *
+ *  Doesn't do much - there's no PHY available to the VF.
+ **/
+static s32 e1000_init_phy_params_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_phy_params_vf");
+       hw->phy.type = e1000_phy_vf;
+       hw->phy.ops.acquire = e1000_acquire_vf;
+       hw->phy.ops.release = e1000_release_vf;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_nvm_params_vf - Inits NVM params
+ *  @hw: pointer to the HW structure
+ *
+ *  Doesn't do much - there's no NVM available to the VF.
+ **/
+static s32 e1000_init_nvm_params_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_nvm_params_vf");
+       hw->nvm.type = e1000_nvm_none;
+       hw->nvm.ops.acquire = e1000_acquire_vf;
+       hw->nvm.ops.release = e1000_release_vf;
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_vf - Inits MAC params
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_vf(struct e1000_hw *hw)
+{
+       struct e1000_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("e1000_init_mac_params_vf");
+
+       /* Set media type */
+       /*
+        * Virtual functions don't care what they're media type is as they
+        * have no direct access to the PHY, or the media.  That is handled
+        * by the physical function driver.
+        */
+       hw->phy.media_type = e1000_media_type_unknown;
+
+       /* No ASF features for the VF driver */
+       mac->asf_firmware_present = FALSE;
+       /* ARC subsystem not supported */
+       mac->arc_subsystem_valid = FALSE;
+       /* Disable adaptive IFS mode so the generic funcs don't do anything */
+       mac->adaptive_ifs = FALSE;
+       /* VF's have no MTA Registers - PF feature only */
+       mac->mta_reg_count = 128;
+       /* VF's have no access to RAR entries  */
+       mac->rar_entry_count = 1;
+
+       /* Function pointers */
+       /* link setup */
+       mac->ops.setup_link = e1000_setup_link_vf;
+       /* bus type/speed/width */
+       mac->ops.get_bus_info = e1000_get_bus_info_pcie_vf;
+       /* reset */
+       mac->ops.reset_hw = e1000_reset_hw_vf;
+       /* hw initialization */
+       mac->ops.init_hw = e1000_init_hw_vf;
+       /* check for link */
+       mac->ops.check_for_link = e1000_check_for_link_vf;
+       /* link info */
+       mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
+       /* multicast address update */
+       mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
+       /* set mac address */
+       mac->ops.rar_set = e1000_rar_set_vf;
+       /* read mac address */
+       mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
+
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_function_pointers_vf - Inits function pointers
+ *  @hw: pointer to the HW structure
+ **/
+void e1000_init_function_pointers_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_function_pointers_vf");
+
+       hw->mac.ops.init_params = e1000_init_mac_params_vf;
+       hw->nvm.ops.init_params = e1000_init_nvm_params_vf;
+       hw->phy.ops.init_params = e1000_init_phy_params_vf;
+       hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
+}
+
+/**
+ *  e1000_acquire_vf - Acquire rights to access PHY or NVM.
+ *  @hw: pointer to the HW structure
+ *
+ *  There is no PHY or NVM so we want all attempts to acquire these to fail.
+ *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ *  even want any SW to attempt to use them.
+ **/
+static s32 e1000_acquire_vf(struct e1000_hw *hw)
+{
+       return -E1000_ERR_PHY;
+}
+
+/**
+ *  e1000_release_vf - Release PHY or NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  There is no PHY or NVM so we want all attempts to acquire these to fail.
+ *  In addition, the MAC registers to access PHY/NVM don't exist so we don't
+ *  even want any SW to attempt to use them.
+ **/
+static void e1000_release_vf(struct e1000_hw *hw)
+{
+       return;
+}
+
+/**
+ *  e1000_setup_link_vf - Sets up link.
+ *  @hw: pointer to the HW structure
+ *
+ *  Virtual functions cannot change link.
+ **/
+static s32 e1000_setup_link_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_setup_link_vf");
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_vf - Gets the bus info.
+ *  @hw: pointer to the HW structure
+ *
+ *  Virtual functions are not really on their own bus.
+ **/
+static s32 e1000_get_bus_info_pcie_vf(struct e1000_hw *hw)
+{
+       struct e1000_bus_info *bus = &hw->bus;
+
+       DEBUGFUNC("e1000_get_bus_info_pcie_vf");
+
+       /* Do not set type PCI-E because we don't want disable master to run */
+       bus->type = e1000_bus_type_reserved;
+       bus->speed = e1000_bus_speed_2500;
+
+       return 0;
+}
+
+/**
+ *  e1000_get_link_up_info_vf - Gets link info.
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to 16 bit value to store link speed.
+ *  @duplex: pointer to 16 bit value to store duplex.
+ *
+ *  Since we cannot read the PHY and get accurate link info, we must rely upon
+ *  the status register's data which is often stale and inaccurate.
+ **/
+static s32 e1000_get_link_up_info_vf(struct e1000_hw *hw, u16 *speed,
+                                     u16 *duplex)
+{
+       s32 status;
+
+       DEBUGFUNC("e1000_get_link_up_info_vf");
+
+       status = E1000_READ_REG(hw, E1000_STATUS);
+       if (status & E1000_STATUS_SPEED_1000) {
+               *speed = SPEED_1000;
+               DEBUGOUT("1000 Mbs, ");
+       } else if (status & E1000_STATUS_SPEED_100) {
+               *speed = SPEED_100;
+               DEBUGOUT("100 Mbs, ");
+       } else {
+               *speed = SPEED_10;
+               DEBUGOUT("10 Mbs, ");
+       }
+
+       if (status & E1000_STATUS_FD) {
+               *duplex = FULL_DUPLEX;
+               DEBUGOUT("Full Duplex\n");
+       } else {
+               *duplex = HALF_DUPLEX;
+               DEBUGOUT("Half Duplex\n");
+       }
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_hw_vf - Resets the HW
+ *  @hw: pointer to the HW structure
+ *
+ *  VF's provide a function level reset. This is done using bit 26 of ctrl_reg.
+ *  This is all the reset we can perform on a VF.
+ **/
+static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 timeout = E1000_VF_INIT_TIMEOUT;
+       s32 ret_val = -E1000_ERR_MAC_INIT;
+       u32 ctrl, msgbuf[3];
+       u8 *addr = (u8 *)(&msgbuf[1]);
+
+       DEBUGFUNC("e1000_reset_hw_vf");
+
+       DEBUGOUT("Issuing a function level reset to MAC\n");
+       ctrl = E1000_READ_REG(hw, E1000_CTRL);
+       E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+       /* we cannot reset while the RSTI / RSTD bits are asserted */
+       while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+               timeout--;
+               usec_delay(5);
+       }
+
+       if (timeout) {
+               /* mailbox timeout can now become active */
+               mbx->timeout = E1000_VF_MBX_INIT_TIMEOUT;
+
+               msgbuf[0] = E1000_VF_RESET;
+               mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+               msec_delay(10);
+
+               /* set our "perm_addr" based on info provided by PF */
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+               if (!ret_val) {
+                       if (msgbuf[0] == (E1000_VF_RESET |
+                                               E1000_VT_MSGTYPE_ACK))
+                               memcpy(hw->mac.perm_addr, addr, 6);
+                       else
+                               ret_val = -E1000_ERR_MAC_INIT;
+               }
+       }
+
+       return ret_val;
+}
+
+/**
+ *  e1000_init_hw_vf - Inits the HW
+ *  @hw: pointer to the HW structure
+ *
+ *  Not much to do here except clear the PF Reset indication if there is one.
+ **/
+static s32 e1000_init_hw_vf(struct e1000_hw *hw)
+{
+       DEBUGFUNC("e1000_init_hw_vf");
+
+       /* attempt to set and restore our mac address */
+       e1000_rar_set_vf(hw, hw->mac.addr, 0);
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_rar_set_vf - set device MAC address
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index receive address array register
+ **/
+static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[3];
+       u8 *msg_addr = (u8 *)(&msgbuf[1]);
+       s32 ret_val;
+
+       memset(msgbuf, 0, 12);
+       msgbuf[0] = E1000_VF_SET_MAC_ADDR;
+       memcpy(msg_addr, addr, 6);
+       ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+       msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
+
+       /* if nacked the address was rejected, use "perm_addr" */
+       if (!ret_val &&
+           (msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))
+               e1000_read_mac_addr_vf(hw);
+}
+
+/**
+ *  e1000_hash_mc_addr_vf - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.
+ **/
+static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
+{
+       u32 hash_value, hash_mask;
+       u8 bit_shift = 0;
+
+       DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+       /* Register count multiplied by bits per register */
+       hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+       /*
+        * The bit_shift is the number of left-shifts
+        * where 0xFF would still fall within the hash mask.
+        */
+       while (hash_mask >> bit_shift != 0xFF)
+               bit_shift++;
+
+       hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+                                 (((u16) mc_addr[5]) << bit_shift)));
+
+       return hash_value;
+}
+
+/**
+ *  e1000_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
+                                  u8 *mc_addr_list, u32 mc_addr_count)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[E1000_VFMAILBOX_SIZE];
+       u16 *hash_list = (u16 *)&msgbuf[1];
+       u32 hash_value;
+       u32 i;
+
+       DEBUGFUNC("e1000_update_mc_addr_list_vf");
+
+       /* Each entry in the list uses 1 16 bit word.  We have 30
+        * 16 bit words available in our HW msg buffer (minus 1 for the
+        * msg type).  That's 30 hash values if we pack 'em right.  If
+        * there are more than 30 MC addresses to add then punt the
+        * extras for now and then add code to handle more than 30 later.
+        * It would be unusual for a server to request that many multi-cast
+        * addresses except for in large enterprise network environments.
+        */
+
+       DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+       if (mc_addr_count > 30) {
+               msgbuf[0] |= E1000_VF_SET_MULTICAST_OVERFLOW;
+               mc_addr_count = 30;
+       }
+
+       msgbuf[0] = E1000_VF_SET_MULTICAST;
+       msgbuf[0] |= mc_addr_count << E1000_VT_MSGINFO_SHIFT;
+
+       for (i = 0; i < mc_addr_count; i++) {
+               hash_value = e1000_hash_mc_addr_vf(hw, mc_addr_list);
+               DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+               hash_list[i] = hash_value & 0x0FFF;
+               mc_addr_list += ETH_ADDR_LEN;
+       }
+
+       mbx->ops.write_posted(hw, msgbuf, E1000_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ *  e1000_vfta_set_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vid: determines the vfta register and bit to set/unset
+ *  @set: if TRUE then set bit, else clear bit
+ **/
+void e1000_vfta_set_vf(struct e1000_hw *hw, u16 vid, bool set)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+
+       msgbuf[0] = E1000_VF_SET_VLAN;
+       msgbuf[1] = vid;
+       /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+       if (set)
+               msgbuf[0] |= E1000_VF_SET_VLAN_ADD;
+
+       mbx->ops.write_posted(hw, msgbuf, 2, 0);
+}
+
+/** e1000_rlpml_set_vf - Set the maximum receive packet length
+ *  @hw: pointer to the HW structure
+ *  @max_size: value to assign to max frame size
+ **/
+void e1000_rlpml_set_vf(struct e1000_hw *hw, u16 max_size)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+
+       msgbuf[0] = E1000_VF_SET_LPE;
+       msgbuf[1] = max_size;
+
+       mbx->ops.write_posted(hw, msgbuf, 2, 0);
+}
+
+/**
+ *  e1000_promisc_set_vf - Set flags for Unicast or Multicast promisc
+ *  @hw: pointer to the HW structure
+ *  @uni: boolean indicating unicast promisc status
+ *  @multi: boolean indicating multicast promisc status
+ **/
+s32 e1000_promisc_set_vf(struct e1000_hw *hw, enum e1000_promisc_type type)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf = E1000_VF_SET_PROMISC;
+       s32 ret_val;
+
+       switch (type) {
+       case e1000_promisc_multicast:
+               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+               break;
+       case e1000_promisc_enabled:
+               msgbuf |= E1000_VF_SET_PROMISC_MULTICAST;
+       case e1000_promisc_unicast:
+               msgbuf |= E1000_VF_SET_PROMISC_UNICAST;
+       case e1000_promisc_disabled:
+               break;
+       default:
+               return -E1000_ERR_MAC_INIT;
+       }
+
+        ret_val = mbx->ops.write_posted(hw, &msgbuf, 1, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, &msgbuf, 1, 0);
+
+       if (!ret_val && !(msgbuf & E1000_VT_MSGTYPE_ACK))
+               ret_val = -E1000_ERR_MAC_INIT;
+
+       return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_vf(struct e1000_hw *hw)
+{
+       int i;
+
+       for (i = 0; i < ETH_ADDR_LEN; i++)
+               hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+       return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_for_link_vf - Check for link for a virtual interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see if the underlying PF is still talking to the VF and
+ *  if it is then it reports the link state to the hardware, otherwise
+ *  it reports link down and returns an error.
+ **/
+static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
+{
+       struct e1000_mbx_info *mbx = &hw->mbx;
+       struct e1000_mac_info *mac = &hw->mac;
+       s32 ret_val = E1000_SUCCESS;
+       u32 in_msg = 0;
+
+       DEBUGFUNC("e1000_check_for_link_vf");
+
+       /*
+        * We only want to run this if there has been a rst asserted.
+        * in this case that could mean a link change, device reset,
+        * or a virtual function reset
+        */
+
+       /* If we were hit with a reset or timeout drop the link */
+       if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+               mac->get_link_status = TRUE;
+
+       if (!mac->get_link_status)
+               goto out;
+
+       /* if link status is down no point in checking to see if pf is up */
+       if (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU))
+               goto out;
+
+       /* if the read failed it could just be a mailbox collision, best wait
+        * until we are called again and don't report an error */
+       if (mbx->ops.read(hw, &in_msg, 1, 0))
+               goto out;
+
+       /* if incoming message isn't clear to send we are waiting on response */
+       if (!(in_msg & E1000_VT_MSGTYPE_CTS)) {
+               /* message is not CTS and is NACK we have lost CTS status */
+               if (in_msg & E1000_VT_MSGTYPE_NACK)
+                       ret_val = -E1000_ERR_MAC_INIT;
+               goto out;
+       }
+
+       /* at this point we know the PF is talking to us, check and see if
+        * we are still accepting timeout or if we had a timeout failure.
+        * if we failed then we will need to reinit */
+       if (!mbx->timeout) {
+               ret_val = -E1000_ERR_MAC_INIT;
+               goto out;
+       }
+
+       /* if we passed all the tests above then the link is up and we no
+        * longer need to check for link */
+       mac->get_link_status = FALSE;
+
+out:
+       return ret_val;
+}
+
diff --git a/lib/librte_pmd_igb/igb/e1000_vf.h b/lib/librte_pmd_igb/igb/e1000_vf.h
new file mode 100644 (file)
index 0000000..b2fd8a1
--- /dev/null
@@ -0,0 +1,294 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _E1000_VF_H_
+#define _E1000_VF_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576_VF                 0x10CA
+#define E1000_DEV_ID_I350_VF                  0x1520
+
+#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Additional Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_MASK                  0x00000F00
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_LEGACY                    0x00000000
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT                 0x04000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION           0x06000000
+#define E1000_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define E1000_SRRCTL_DESCTYPE_MASK                      0x0E000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+
+#define E1000_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define E1000_SRRCTL_BSIZEHDR_MASK      0x00003F00
+
+/* Interrupt Defines */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + ((_n) << 2))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_IVAR_VALID        0x80
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+       struct {
+               u64 pkt_addr;             /* Packet buffer address */
+               u64 hdr_addr;             /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       union {
+                               u32 data;
+                               struct {
+                                       /* RSS type, Packet type */
+                                       u16 pkt_info;
+                                       /* Split Header, header buffer len */
+                                       u16 hdr_info;
+                               } hs_rss;
+                       } lo_dword;
+                       union {
+                               u32 rss;          /* RSS Hash */
+                               struct {
+                                       u16 ip_id;    /* IP id */
+                                       u16 csum;     /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       u32 status_error;     /* ext status/error */
+                       u16 length;           /* Packet length */
+                       u16 vlan;             /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+       struct {
+               u64 buffer_addr;    /* Address of descriptor's data buf */
+               u32 cmd_type_len;
+               u32 olinfo_status;
+       } read;
+       struct {
+               u64 rsvd;       /* Reserved */
+               u32 nxtseq_seed;
+               u32 status;
+       } wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+       u32 vlan_macip_lens;
+       u32 seqnum_seed;
+       u32 type_tucmd_mlhl;
+       u32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+
+enum e1000_mac_type {
+       e1000_undefined = 0,
+       e1000_vfadapt,
+       e1000_vfadapt_i350,
+       e1000_num_macs  /* List is 1-based, so subtract 1 for TRUE count. */
+};
+
+struct e1000_vf_stats {
+       u64 base_gprc;
+       u64 base_gptc;
+       u64 base_gorc;
+       u64 base_gotc;
+       u64 base_mprc;
+       u64 base_gotlbc;
+       u64 base_gptlbc;
+       u64 base_gorlbc;
+       u64 base_gprlbc;
+
+       u32 last_gprc;
+       u32 last_gptc;
+       u32 last_gorc;
+       u32 last_gotc;
+       u32 last_mprc;
+       u32 last_gotlbc;
+       u32 last_gptlbc;
+       u32 last_gorlbc;
+       u32 last_gprlbc;
+
+       u64 gprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 mprc;
+       u64 gotlbc;
+       u64 gptlbc;
+       u64 gorlbc;
+       u64 gprlbc;
+};
+
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+       /* Function pointers for the MAC. */
+       s32  (*init_params)(struct e1000_hw *);
+       s32  (*check_for_link)(struct e1000_hw *);
+       void (*clear_vfta)(struct e1000_hw *);
+       s32  (*get_bus_info)(struct e1000_hw *);
+       s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+       void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+       s32  (*reset_hw)(struct e1000_hw *);
+       s32  (*init_hw)(struct e1000_hw *);
+       s32  (*setup_link)(struct e1000_hw *);
+       void (*write_vfta)(struct e1000_hw *, u32, u32);
+       void (*rar_set)(struct e1000_hw *, u8*, u32);
+       s32  (*read_mac_addr)(struct e1000_hw *);
+};
+
+struct e1000_mac_info {
+       struct e1000_mac_operations ops;
+       u8 addr[6];
+       u8 perm_addr[6];
+
+       enum e1000_mac_type type;
+
+       u16 mta_reg_count;
+       u16 rar_entry_count;
+
+       bool get_link_status;
+};
+
+struct e1000_mbx_operations {
+       s32 (*init_params)(struct e1000_hw *hw);
+       s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+       s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+       s32 (*check_for_msg)(struct e1000_hw *, u16);
+       s32 (*check_for_ack)(struct e1000_hw *, u16);
+       s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+       u32 msgs_tx;
+       u32 msgs_rx;
+
+       u32 acks;
+       u32 reqs;
+       u32 rsts;
+};
+
+struct e1000_mbx_info {
+       struct e1000_mbx_operations ops;
+       struct e1000_mbx_stats stats;
+       u32 timeout;
+       u32 usec_delay;
+       u16 size;
+};
+
+struct e1000_dev_spec_vf {
+       u32 vf_number;
+       u32 v2p_mailbox;
+};
+
+struct e1000_hw {
+       void *back;
+
+       u8 *hw_addr;
+       u8 *flash_address;
+       unsigned long io_base;
+
+       struct e1000_mac_info  mac;
+       struct e1000_mbx_info mbx;
+
+       union {
+               struct e1000_dev_spec_vf vf;
+       } dev_spec;
+
+       u16 device_id;
+       u16 subsystem_vendor_id;
+       u16 subsystem_device_id;
+       u16 vendor_id;
+
+       u8  revision_id;
+};
+
+enum e1000_promisc_type {
+       e1000_promisc_disabled = 0,   /* all promisc modes disabled */
+       e1000_promisc_unicast = 1,    /* unicast promiscuous enabled */
+       e1000_promisc_multicast = 2,  /* multicast promiscuous enabled */
+       e1000_promisc_enabled = 3,    /* both uni and multicast promisc */
+       e1000_num_promisc_types
+};
+
+/* These functions must be implemented by drivers */
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_vfta_set_vf(struct e1000_hw *, u16, bool);
+void e1000_rlpml_set_vf(struct e1000_hw *, u16);
+s32 e1000_promisc_set_vf(struct e1000_hw *, enum e1000_promisc_type);
+#endif /* _E1000_VF_H_ */
diff --git a/lib/librte_pmd_igb/igb/if_igb.c b/lib/librte_pmd_igb/igb/if_igb.c
new file mode 100644 (file)
index 0000000..4aa08f6
--- /dev/null
@@ -0,0 +1,5567 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_device_polling.h"
+#include "opt_inet.h"
+#include "opt_altq.h"
+#endif
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/eventhandler.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+#include <dev/led/led.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include "e1000_api.h"
+#include "e1000_82575.h"
+#include "if_igb.h"
+
+/*********************************************************************
+ *  Set this to one to display debug statistics
+ *********************************************************************/
+int    igb_display_debug_stats = 0;
+
+/*********************************************************************
+ *  Driver version:
+ *********************************************************************/
+char igb_driver_version[] = "version - 2.2.3";
+
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into e1000_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static igb_vendor_info_t igb_vendor_info_array[] =
+{
+       { 0x8086, E1000_DEV_ID_82575EB_COPPER,  PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576,           PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_NS,        PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82576_VF,        PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_COPPER,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_FIBER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_SERDES,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_SGMII,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII,  PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_SFP,    PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE,
+                                               PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_COPPER,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_FIBER,      PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_SERDES,     PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_SGMII,      PCI_ANY_ID, PCI_ANY_ID, 0},
+       { 0x8086, E1000_DEV_ID_I350_VF,         PCI_ANY_ID, PCI_ANY_ID, 0},
+       /* required last entry */
+       { 0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings for all supported NICs.
+ *********************************************************************/
+
+static char *igb_strings[] = {
+       "Intel(R) PRO/1000 Network Connection"
+};
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int     igb_probe(device_t);
+static int     igb_attach(device_t);
+static int     igb_detach(device_t);
+static int     igb_shutdown(device_t);
+static int     igb_suspend(device_t);
+static int     igb_resume(device_t);
+static void    igb_start(struct ifnet *);
+static void    igb_start_locked(struct tx_ring *, struct ifnet *ifp);
+#if __FreeBSD_version >= 800000
+static int     igb_mq_start(struct ifnet *, struct mbuf *);
+static int     igb_mq_start_locked(struct ifnet *,
+                   struct tx_ring *, struct mbuf *);
+static void    igb_qflush(struct ifnet *);
+#endif
+static int     igb_ioctl(struct ifnet *, u_long, caddr_t);
+static void    igb_init(void *);
+static void    igb_init_locked(struct adapter *);
+static void    igb_stop(void *);
+static void    igb_media_status(struct ifnet *, struct ifmediareq *);
+static int     igb_media_change(struct ifnet *);
+static void    igb_identify_hardware(struct adapter *);
+static int     igb_allocate_pci_resources(struct adapter *);
+static int     igb_allocate_msix(struct adapter *);
+static int     igb_allocate_legacy(struct adapter *);
+static int     igb_setup_msix(struct adapter *);
+static void    igb_free_pci_resources(struct adapter *);
+static void    igb_local_timer(void *);
+static void    igb_reset(struct adapter *);
+static int     igb_setup_interface(device_t, struct adapter *);
+static int     igb_allocate_queues(struct adapter *);
+static void    igb_configure_queues(struct adapter *);
+
+static int     igb_allocate_transmit_buffers(struct tx_ring *);
+static void    igb_setup_transmit_structures(struct adapter *);
+static void    igb_setup_transmit_ring(struct tx_ring *);
+static void    igb_initialize_transmit_units(struct adapter *);
+static void    igb_free_transmit_structures(struct adapter *);
+static void    igb_free_transmit_buffers(struct tx_ring *);
+
+static int     igb_allocate_receive_buffers(struct rx_ring *);
+static int     igb_setup_receive_structures(struct adapter *);
+static int     igb_setup_receive_ring(struct rx_ring *);
+static void    igb_initialize_receive_units(struct adapter *);
+static void    igb_free_receive_structures(struct adapter *);
+static void    igb_free_receive_buffers(struct rx_ring *);
+static void    igb_free_receive_ring(struct rx_ring *);
+
+static void    igb_enable_intr(struct adapter *);
+static void    igb_disable_intr(struct adapter *);
+static void    igb_update_stats_counters(struct adapter *);
+static bool    igb_txeof(struct tx_ring *);
+
+static __inline        void igb_rx_discard(struct rx_ring *, int);
+static __inline void igb_rx_input(struct rx_ring *,
+                   struct ifnet *, struct mbuf *, u32);
+
+static bool    igb_rxeof(struct igb_queue *, int, int *);
+static void    igb_rx_checksum(u32, struct mbuf *, u32);
+static int     igb_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool    igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void    igb_set_promisc(struct adapter *);
+static void    igb_disable_promisc(struct adapter *);
+static void    igb_set_multi(struct adapter *);
+static void    igb_update_link_status(struct adapter *);
+static void    igb_refresh_mbufs(struct rx_ring *, int);
+
+static void    igb_register_vlan(void *, struct ifnet *, u16);
+static void    igb_unregister_vlan(void *, struct ifnet *, u16);
+static void    igb_setup_vlan_hw_support(struct adapter *);
+
+static int     igb_xmit(struct tx_ring *, struct mbuf **);
+static int     igb_dma_malloc(struct adapter *, bus_size_t,
+                   struct igb_dma_alloc *, int);
+static void    igb_dma_free(struct adapter *, struct igb_dma_alloc *);
+static int     igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
+static void    igb_print_nvm_info(struct adapter *);
+static int     igb_is_valid_ether_addr(u8 *);
+static void     igb_add_hw_stats(struct adapter *);
+
+static void    igb_vf_init_stats(struct adapter *);
+static void    igb_update_vf_stats_counters(struct adapter *);
+
+/* Management and WOL Support */
+static void    igb_init_manageability(struct adapter *);
+static void    igb_release_manageability(struct adapter *);
+static void     igb_get_hw_control(struct adapter *);
+static void     igb_release_hw_control(struct adapter *);
+static void     igb_enable_wakeup(device_t);
+static void     igb_led_func(void *, int);
+
+static int     igb_irq_fast(void *);
+static void    igb_msix_que(void *);
+static void    igb_msix_link(void *);
+static void    igb_handle_que(void *context, int pending);
+static void    igb_handle_link(void *context, int pending);
+
+static void    igb_set_sysctl_value(struct adapter *, const char *,
+                   const char *, int *, int);
+static int     igb_set_flowcntl(SYSCTL_HANDLER_ARGS);
+
+#ifdef DEVICE_POLLING
+static poll_handler_t igb_poll;
+#endif /* POLLING */
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t igb_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe, igb_probe),
+       DEVMETHOD(device_attach, igb_attach),
+       DEVMETHOD(device_detach, igb_detach),
+       DEVMETHOD(device_shutdown, igb_shutdown),
+       DEVMETHOD(device_suspend, igb_suspend),
+       DEVMETHOD(device_resume, igb_resume),
+       {0, 0}
+};
+
+static driver_t igb_driver = {
+       "igb", igb_methods, sizeof(struct adapter),
+};
+
+static devclass_t igb_devclass;
+DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0);
+MODULE_DEPEND(igb, pci, 1, 1, 1);
+MODULE_DEPEND(igb, ether, 1, 1, 1);
+
+/*********************************************************************
+ *  Tunable default values.
+ *********************************************************************/
+
+/* Descriptor defaults */
+static int igb_rxd = IGB_DEFAULT_RXD;
+static int igb_txd = IGB_DEFAULT_TXD;
+TUNABLE_INT("hw.igb.rxd", &igb_rxd);
+TUNABLE_INT("hw.igb.txd", &igb_txd);
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int igb_enable_aim = TRUE;
+TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim);
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */         
+static int igb_enable_msix = 1;
+TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix);
+
+/*
+** Tuneable Interrupt rate
+*/
+static int igb_max_interrupt_rate = 8000;
+TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate);
+
+/*
+** Header split causes the packet header to
+** be dma'd to a seperate mbuf from the payload.
+** this can have memory alignment benefits. But
+** another plus is that small packets often fit
+** into the header and thus use no cluster. Its
+** a very workload dependent type feature.
+*/
+static bool igb_header_split = FALSE;
+TUNABLE_INT("hw.igb.hdr_split", &igb_header_split);
+
+/*
+** This will autoconfigure based on
+** the number of CPUs if left at 0.
+*/
+static int igb_num_queues = 0;
+TUNABLE_INT("hw.igb.num_queues", &igb_num_queues);
+
+/* How many packets rxeof tries to clean at a time */
+static int igb_rx_process_limit = 100;
+TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit);
+
+/* Flow control setting - default to FULL */
+static int igb_fc_setting = e1000_fc_full;
+TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting);
+
+/* Energy Efficient Ethernet - default to off */
+static int igb_eee_disabled = TRUE;
+TUNABLE_INT("hw.igb.eee_disabled", &igb_eee_disabled);
+
+/*
+** DMA Coalescing, only for i350 - default to off,
+** this feature is for power savings
+*/
+static int igb_dma_coalesce = FALSE;
+TUNABLE_INT("hw.igb.dma_coalesce", &igb_dma_coalesce);
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  igb_probe determines if the driver should be loaded on
+ *  adapter based on PCI vendor/device id of the adapter.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_probe(device_t dev)
+{
+       char            adapter_name[60];
+       uint16_t        pci_vendor_id = 0;
+       uint16_t        pci_device_id = 0;
+       uint16_t        pci_subvendor_id = 0;
+       uint16_t        pci_subdevice_id = 0;
+       igb_vendor_info_t *ent;
+
+       INIT_DEBUGOUT("igb_probe: begin");
+
+       pci_vendor_id = pci_get_vendor(dev);
+       if (pci_vendor_id != IGB_VENDOR_ID)
+               return (ENXIO);
+
+       pci_device_id = pci_get_device(dev);
+       pci_subvendor_id = pci_get_subvendor(dev);
+       pci_subdevice_id = pci_get_subdevice(dev);
+
+       ent = igb_vendor_info_array;
+       while (ent->vendor_id != 0) {
+               if ((pci_vendor_id == ent->vendor_id) &&
+                   (pci_device_id == ent->device_id) &&
+
+                   ((pci_subvendor_id == ent->subvendor_id) ||
+                   (ent->subvendor_id == PCI_ANY_ID)) &&
+
+                   ((pci_subdevice_id == ent->subdevice_id) ||
+                   (ent->subdevice_id == PCI_ANY_ID))) {
+                       sprintf(adapter_name, "%s %s",
+                               igb_strings[ent->index],
+                               igb_driver_version);
+                       device_set_desc_copy(dev, adapter_name);
+                       return (BUS_PROBE_DEFAULT);
+               }
+               ent++;
+       }
+
+       return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_attach(device_t dev)
+{
+       struct adapter  *adapter;
+       int             error = 0;
+       u16             eeprom_data;
+
+       INIT_DEBUGOUT("igb_attach: begin");
+
+       adapter = device_get_softc(dev);
+       adapter->dev = adapter->osdep.dev = dev;
+       IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+       /* SYSCTL stuff */
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0,
+           igb_sysctl_nvm_info, "I", "NVM Information");
+
+       SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+           &igb_enable_aim, 1, "Interrupt Moderation");
+
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+           OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW,
+           adapter, 0, igb_set_flowcntl, "I", "Flow Control");
+
+       callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+       /* Determine hardware and mac info */
+       igb_identify_hardware(adapter);
+
+       /* Setup PCI resources */
+       if (igb_allocate_pci_resources(adapter)) {
+               device_printf(dev, "Allocation of PCI resources failed\n");
+               error = ENXIO;
+               goto err_pci;
+       }
+
+       /* Do Shared Code initialization */
+       if (e1000_setup_init_funcs(&adapter->hw, TRUE)) {
+               device_printf(dev, "Setup of Shared code failed\n");
+               error = ENXIO;
+               goto err_pci;
+       }
+
+       e1000_get_bus_info(&adapter->hw);
+
+       /* Sysctl for limiting the amount of work done in the taskqueue */
+       igb_set_sysctl_value(adapter, "rx_processing_limit",
+           "max number of rx packets to process", &adapter->rx_process_limit,
+           igb_rx_process_limit);
+
+       /*
+        * Validate number of transmit and receive descriptors. It
+        * must not exceed hardware maximum, and must be multiple
+        * of E1000_DBA_ALIGN.
+        */
+       if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 ||
+           (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) {
+               device_printf(dev, "Using %d TX descriptors instead of %d!\n",
+                   IGB_DEFAULT_TXD, igb_txd);
+               adapter->num_tx_desc = IGB_DEFAULT_TXD;
+       } else
+               adapter->num_tx_desc = igb_txd;
+       if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 ||
+           (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) {
+               device_printf(dev, "Using %d RX descriptors instead of %d!\n",
+                   IGB_DEFAULT_RXD, igb_rxd);
+               adapter->num_rx_desc = IGB_DEFAULT_RXD;
+       } else
+               adapter->num_rx_desc = igb_rxd;
+
+       adapter->hw.mac.autoneg = DO_AUTO_NEG;
+       adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+       adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+
+       /* Copper options */
+       if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+               adapter->hw.phy.mdix = AUTO_ALL_MODES;
+               adapter->hw.phy.disable_polarity_correction = FALSE;
+               adapter->hw.phy.ms_type = IGB_MASTER_SLAVE;
+       }
+
+       /*
+        * Set the frame limits assuming
+        * standard ethernet sized frames.
+        */
+       adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
+       adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
+
+       /*
+       ** Allocate and Setup Queues
+       */
+       if (igb_allocate_queues(adapter)) {
+               error = ENOMEM;
+               goto err_pci;
+       }
+
+       /* Allocate the appropriate stats memory */
+       if (adapter->vf_ifp) {
+               adapter->stats =
+                   (struct e1000_vf_stats *)malloc(sizeof \
+                   (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
+               igb_vf_init_stats(adapter);
+       } else
+               adapter->stats =
+                   (struct e1000_hw_stats *)malloc(sizeof \
+                   (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO);
+       if (adapter->stats == NULL) {
+               device_printf(dev, "Can not allocate stats memory\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Allocate multicast array memory. */
+       adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN *
+           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+       if (adapter->mta == NULL) {
+               device_printf(dev, "Can not allocate multicast setup array\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Some adapter-specific advanced features */
+       if (adapter->hw.mac.type >= e1000_i350) {
+               igb_set_sysctl_value(adapter, "dma_coalesce",
+                   "configure dma coalesce",
+                   &adapter->dma_coalesce, igb_dma_coalesce);
+               igb_set_sysctl_value(adapter, "eee_disabled",
+                   "enable Energy Efficient Ethernet",
+                   &adapter->hw.dev_spec._82575.eee_disable,
+                   igb_eee_disabled);
+               e1000_set_eee_i350(&adapter->hw);
+       }
+
+       /*
+       ** Start from a known state, this is
+       ** important in reading the nvm and
+       ** mac from that.
+       */
+       e1000_reset_hw(&adapter->hw);
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+               /*
+               ** Some PCI-E parts fail the first check due to
+               ** the link being in sleep state, call it again,
+               ** if it fails a second time its a real issue.
+               */
+               if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+                       device_printf(dev,
+                           "The EEPROM Checksum Is Not Valid\n");
+                       error = EIO;
+                       goto err_late;
+               }
+       }
+
+       /*
+       ** Copy the permanent MAC address out of the EEPROM
+       */
+       if (e1000_read_mac_addr(&adapter->hw) < 0) {
+               device_printf(dev, "EEPROM read error while reading MAC"
+                   " address\n");
+               error = EIO;
+               goto err_late;
+       }
+       /* Check its sanity */
+       if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) {
+               device_printf(dev, "Invalid MAC address\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* 
+       ** Configure Interrupts
+       */
+       if ((adapter->msix > 1) && (igb_enable_msix))
+               error = igb_allocate_msix(adapter);
+       else /* MSI or Legacy */
+               error = igb_allocate_legacy(adapter);
+       if (error)
+               goto err_late;
+
+       /* Setup OS specific network interface */
+       if (igb_setup_interface(dev, adapter) != 0)
+               goto err_late;
+
+       /* Now get a good starting state */
+       igb_reset(adapter);
+
+       /* Initialize statistics */
+       igb_update_stats_counters(adapter);
+
+       adapter->hw.mac.get_link_status = 1;
+       igb_update_link_status(adapter);
+
+       /* Indicate SOL/IDER usage */
+       if (e1000_check_reset_block(&adapter->hw))
+               device_printf(dev,
+                   "PHY reset is blocked due to SOL/IDER session.\n");
+
+       /* Determine if we have to control management hardware */
+       adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw);
+
+       /*
+        * Setup Wake-on-Lan
+        */
+       /* APME bit in EEPROM is mapped to WUC.APME */
+       eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME;
+       if (eeprom_data)
+               adapter->wol = E1000_WUFC_MAG;
+
+       /* Register for VLAN events */
+       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+            igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+            igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+       igb_add_hw_stats(adapter);
+
+       /* Tell the stack that the interface is not active */
+       adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       adapter->led_dev = led_create(igb_led_func, adapter,
+           device_get_nameunit(dev));
+
+       INIT_DEBUGOUT("igb_attach: end");
+
+       return (0);
+
+err_late:
+       igb_free_transmit_structures(adapter);
+       igb_free_receive_structures(adapter);
+       igb_release_hw_control(adapter);
+       if (adapter->ifp != NULL)
+               if_free(adapter->ifp);
+err_pci:
+       igb_free_pci_resources(adapter);
+       free(adapter->mta, M_DEVBUF);
+       IGB_CORE_LOCK_DESTROY(adapter);
+
+       return (error);
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+igb_detach(device_t dev)
+{
+       struct adapter  *adapter = device_get_softc(dev);
+       struct ifnet    *ifp = adapter->ifp;
+
+       INIT_DEBUGOUT("igb_detach: begin");
+
+       /* Make sure VLANS are not using driver */
+       if (adapter->ifp->if_vlantrunk != NULL) {
+               device_printf(dev,"Vlan in use, detach first\n");
+               return (EBUSY);
+       }
+
+       if (adapter->led_dev != NULL)
+               led_destroy(adapter->led_dev);
+
+#ifdef DEVICE_POLLING
+       if (ifp->if_capenable & IFCAP_POLLING)
+               ether_poll_deregister(ifp);
+#endif
+
+       IGB_CORE_LOCK(adapter);
+       adapter->in_detach = 1;
+       igb_stop(adapter);
+       IGB_CORE_UNLOCK(adapter);
+
+       e1000_phy_hw_reset(&adapter->hw);
+
+       /* Give control back to firmware */
+       igb_release_manageability(adapter);
+       igb_release_hw_control(adapter);
+
+       if (adapter->wol) {
+               E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+               E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+               igb_enable_wakeup(dev);
+       }
+
+       /* Unregister VLAN events */
+       if (adapter->vlan_attach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+       if (adapter->vlan_detach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+       ether_ifdetach(adapter->ifp);
+
+       callout_drain(&adapter->timer);
+
+       igb_free_pci_resources(adapter);
+       bus_generic_detach(dev);
+       if_free(ifp);
+
+       igb_free_transmit_structures(adapter);
+       igb_free_receive_structures(adapter);
+       free(adapter->mta, M_DEVBUF);
+
+       IGB_CORE_LOCK_DESTROY(adapter);
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+igb_shutdown(device_t dev)
+{
+       return igb_suspend(dev);
+}
+
+/*
+ * Suspend/resume device methods.
+ */
+static int
+igb_suspend(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+
+       IGB_CORE_LOCK(adapter);
+
+       igb_stop(adapter);
+
+        igb_release_manageability(adapter);
+       igb_release_hw_control(adapter);
+
+        if (adapter->wol) {
+                E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN);
+                E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol);
+                igb_enable_wakeup(dev);
+        }
+
+       IGB_CORE_UNLOCK(adapter);
+
+       return bus_generic_suspend(dev);
+}
+
+static int
+igb_resume(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       struct ifnet *ifp = adapter->ifp;
+
+       IGB_CORE_LOCK(adapter);
+       igb_init_locked(adapter);
+       igb_init_manageability(adapter);
+
+       if ((ifp->if_flags & IFF_UP) &&
+           (ifp->if_drv_flags & IFF_DRV_RUNNING))
+               igb_start(ifp);
+
+       IGB_CORE_UNLOCK(adapter);
+
+       return bus_generic_resume(dev);
+}
+
+
+/*********************************************************************
+ *  Transmit entry point
+ *
+ *  igb_start is called by the stack to initiate a transmit.
+ *  The driver will remain in this routine as long as there are
+ *  packets to transmit and transmit resources are available.
+ *  In case resources are not available stack is notified and
+ *  the packet is requeued.
+ **********************************************************************/
+
+static void
+igb_start_locked(struct tx_ring *txr, struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct mbuf     *m_head;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING)
+               return;
+       if (!adapter->link_active)
+               return;
+
+       /* Call cleanup if number of TX descriptors low */
+       if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
+               igb_txeof(txr);
+
+       while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+               if (txr->tx_avail <= IGB_MAX_SCATTER) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+               if (m_head == NULL)
+                       break;
+               /*
+                *  Encapsulation can modify our pointer, and or make it
+                *  NULL on failure.  In that event, we can't requeue.
+                */
+               if (igb_xmit(txr, &m_head)) {
+                       if (m_head == NULL)
+                               break;
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+                       break;
+               }
+
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, m_head);
+
+               /* Set watchdog on */
+               txr->watchdog_time = ticks;
+               txr->queue_status = IGB_QUEUE_WORKING;
+       }
+}
+/*
+ * Legacy TX driver routine, called from the
+ * stack, always uses tx[0], and spins for it.
+ * Should not be used with multiqueue tx
+ */
+static void
+igb_start(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               IGB_TX_LOCK(txr);
+               igb_start_locked(txr, ifp);
+               IGB_TX_UNLOCK(txr);
+       }
+       return;
+}
+
+#if __FreeBSD_version >= 800000
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+igb_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+       struct adapter          *adapter = ifp->if_softc;
+       struct igb_queue        *que;
+       struct tx_ring          *txr;
+       int                     i = 0, err = 0;
+
+       /* Which queue to use */
+       if ((m->m_flags & M_FLOWID) != 0)
+               i = m->m_pkthdr.flowid % adapter->num_queues;
+
+       txr = &adapter->tx_rings[i];
+       que = &adapter->queues[i];
+
+       if (IGB_TX_TRYLOCK(txr)) {
+               err = igb_mq_start_locked(ifp, txr, m);
+               IGB_TX_UNLOCK(txr);
+       } else {
+               err = drbr_enqueue(ifp, txr->br, m);
+               taskqueue_enqueue(que->tq, &que->que_task);
+       }
+
+       return (err);
+}
+
+static int
+igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+       struct adapter  *adapter = txr->adapter;
+        struct mbuf     *next;
+        int             err = 0, enq;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING || adapter->link_active == 0) {
+               if (m != NULL)
+                       err = drbr_enqueue(ifp, txr->br, m);
+               return (err);
+       }
+
+       enq = 0;
+       if (m == NULL) {
+               next = drbr_dequeue(ifp, txr->br);
+       } else if (drbr_needs_enqueue(ifp, txr->br)) {
+               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+                       return (err);
+               next = drbr_dequeue(ifp, txr->br);
+       } else
+               next = m;
+
+       /* Process the queue */
+       while (next != NULL) {
+               if ((err = igb_xmit(txr, &next)) != 0) {
+                       if (next != NULL)
+                               err = drbr_enqueue(ifp, txr->br, next);
+                       break;
+               }
+               enq++;
+               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+               ETHER_BPF_MTAP(ifp, next);
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD)
+                       igb_txeof(txr);
+               if (txr->tx_avail <= IGB_MAX_SCATTER) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               next = drbr_dequeue(ifp, txr->br);
+       }
+       if (enq > 0) {
+               /* Set the watchdog */
+               txr->queue_status = IGB_QUEUE_WORKING;
+               txr->watchdog_time = ticks;
+       }
+       return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+igb_qflush(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct mbuf     *m;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+                       m_freem(m);
+               IGB_TX_UNLOCK(txr);
+       }
+       if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  igb_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ifreq *ifr = (struct ifreq *)data;
+#ifdef INET
+       struct ifaddr *ifa = (struct ifaddr *)data;
+#endif
+       int error = 0;
+
+       if (adapter->in_detach)
+               return (error);
+
+       switch (command) {
+       case SIOCSIFADDR:
+#ifdef INET
+               if (ifa->ifa_addr->sa_family == AF_INET) {
+                       /*
+                        * XXX
+                        * Since resetting hardware takes a very long time
+                        * and results in link renegotiation we only
+                        * initialize the hardware only when it is absolutely
+                        * required.
+                        */
+                       ifp->if_flags |= IFF_UP;
+                       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+                               IGB_CORE_LOCK(adapter);
+                               igb_init_locked(adapter);
+                               IGB_CORE_UNLOCK(adapter);
+                       }
+                       if (!(ifp->if_flags & IFF_NOARP))
+                               arp_ifinit(ifp, ifa);
+               } else
+#endif
+                       error = ether_ioctl(ifp, command, data);
+               break;
+       case SIOCSIFMTU:
+           {
+               int max_frame_size;
+
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
+
+               IGB_CORE_LOCK(adapter);
+               max_frame_size = 9234;
+               if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN -
+                   ETHER_CRC_LEN) {
+                       IGB_CORE_UNLOCK(adapter);
+                       error = EINVAL;
+                       break;
+               }
+
+               ifp->if_mtu = ifr->ifr_mtu;
+               adapter->max_frame_size =
+                   ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+               igb_init_locked(adapter);
+               IGB_CORE_UNLOCK(adapter);
+               break;
+           }
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl rcv'd:\
+                   SIOCSIFFLAGS (Set Interface Flags)");
+               IGB_CORE_LOCK(adapter);
+               if (ifp->if_flags & IFF_UP) {
+                       if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+                               if ((ifp->if_flags ^ adapter->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI)) {
+                                       igb_disable_promisc(adapter);
+                                       igb_set_promisc(adapter);
+                               }
+                       } else
+                               igb_init_locked(adapter);
+               } else
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+                               igb_stop(adapter);
+               adapter->if_flags = ifp->if_flags;
+               IGB_CORE_UNLOCK(adapter);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI");
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IGB_CORE_LOCK(adapter);
+                       igb_disable_intr(adapter);
+                       igb_set_multi(adapter);
+#ifdef DEVICE_POLLING
+                       if (!(ifp->if_capenable & IFCAP_POLLING))
+#endif
+                               igb_enable_intr(adapter);
+                       IGB_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFMEDIA:
+               /*
+               ** As the speed/duplex settings are being
+               ** changed, we need toreset the PHY.
+               */
+               adapter->hw.phy.reset_disable = FALSE;
+               /* Check SOL/IDER usage */
+               IGB_CORE_LOCK(adapter);
+               if (e1000_check_reset_block(&adapter->hw)) {
+                       IGB_CORE_UNLOCK(adapter);
+                       device_printf(adapter->dev, "Media change is"
+                           " blocked due to SOL/IDER session.\n");
+                       break;
+               }
+               IGB_CORE_UNLOCK(adapter);
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl rcv'd: \
+                   SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+               break;
+       case SIOCSIFCAP:
+           {
+               int mask, reinit;
+
+               IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)");
+               reinit = 0;
+               mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+#ifdef DEVICE_POLLING
+               if (mask & IFCAP_POLLING) {
+                       if (ifr->ifr_reqcap & IFCAP_POLLING) {
+                               error = ether_poll_register(igb_poll, ifp);
+                               if (error)
+                                       return (error);
+                               IGB_CORE_LOCK(adapter);
+                               igb_disable_intr(adapter);
+                               ifp->if_capenable |= IFCAP_POLLING;
+                               IGB_CORE_UNLOCK(adapter);
+                       } else {
+                               error = ether_poll_deregister(ifp);
+                               /* Enable interrupt even in error case */
+                               IGB_CORE_LOCK(adapter);
+                               igb_enable_intr(adapter);
+                               ifp->if_capenable &= ~IFCAP_POLLING;
+                               IGB_CORE_UNLOCK(adapter);
+                       }
+               }
+#endif
+               if (mask & IFCAP_HWCSUM) {
+                       ifp->if_capenable ^= IFCAP_HWCSUM;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_TSO4) {
+                       ifp->if_capenable ^= IFCAP_TSO4;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_VLAN_HWTAGGING) {
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_VLAN_HWFILTER) {
+                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+                       reinit = 1;
+               }
+               if (mask & IFCAP_LRO) {
+                       ifp->if_capenable ^= IFCAP_LRO;
+                       reinit = 1;
+               }
+               if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING))
+                       igb_init(adapter);
+               VLAN_CAPABILITIES(ifp);
+               break;
+           }
+
+       default:
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+
+       return (error);
+}
+
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static void
+igb_init_locked(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       device_t        dev = adapter->dev;
+
+       INIT_DEBUGOUT("igb_init: begin");
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       igb_disable_intr(adapter);
+       callout_stop(&adapter->timer);
+
+       /* Get the latest mac address, User can use a LAA */
+        bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr,
+              ETHER_ADDR_LEN);
+
+       /* Put the address into the Receive Address Array */
+       e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+       igb_reset(adapter);
+       igb_update_link_status(adapter);
+
+       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+
+       /* Set hardware offload abilities */
+       ifp->if_hwassist = 0;
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+               if (adapter->hw.mac.type == e1000_82576)
+                       ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+
+       if (ifp->if_capenable & IFCAP_TSO4)
+               ifp->if_hwassist |= CSUM_TSO;
+
+       /* Configure for OS presence */
+       igb_init_manageability(adapter);
+
+       /* Prepare transmit descriptors and buffers */
+       igb_setup_transmit_structures(adapter);
+       igb_initialize_transmit_units(adapter);
+
+       /* Setup Multicast table */
+       igb_set_multi(adapter);
+
+       /*
+       ** Figure out the desired mbuf pool
+       ** for doing jumbo/packetsplit
+       */
+       if (adapter->max_frame_size <= 2048)
+               adapter->rx_mbuf_sz = MCLBYTES;
+       else if (adapter->max_frame_size <= 4096)
+               adapter->rx_mbuf_sz = MJUMPAGESIZE;
+       else
+               adapter->rx_mbuf_sz = MJUM9BYTES;
+
+       /* Prepare receive descriptors and buffers */
+       if (igb_setup_receive_structures(adapter)) {
+               device_printf(dev, "Could not setup receive structures\n");
+               return;
+       }
+       igb_initialize_receive_units(adapter);
+
+        /* Enable VLAN support */
+       if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+               igb_setup_vlan_hw_support(adapter);
+                                
+       /* Don't lose promiscuous settings */
+       igb_set_promisc(adapter);
+
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
+       e1000_clear_hw_cntrs_base_generic(&adapter->hw);
+
+       if (adapter->msix > 1) /* Set up queue routing */
+               igb_configure_queues(adapter);
+
+       /* this clears any pending interrupts */
+       E1000_READ_REG(&adapter->hw, E1000_ICR);
+#ifdef DEVICE_POLLING
+       /*
+        * Only enable interrupts if we are not polling, make sure
+        * they are off otherwise.
+        */
+       if (ifp->if_capenable & IFCAP_POLLING)
+               igb_disable_intr(adapter);
+       else
+#endif /* DEVICE_POLLING */
+       {
+               igb_enable_intr(adapter);
+               E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+       }
+
+       /* Set Energy Efficient Ethernet */
+       e1000_set_eee_i350(&adapter->hw);
+
+       /* Don't reset the phy next time init gets called */
+       adapter->hw.phy.reset_disable = TRUE;
+}
+
+static void
+igb_init(void *arg)
+{
+       struct adapter *adapter = arg;
+
+       IGB_CORE_LOCK(adapter);
+       igb_init_locked(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+
+static void
+igb_handle_que(void *context, int pending)
+{
+       struct igb_queue *que = context;
+       struct adapter *adapter = que->adapter;
+       struct tx_ring *txr = que->txr;
+       struct ifnet    *ifp = adapter->ifp;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               bool    more;
+
+               more = igb_rxeof(que, -1, NULL);
+
+               IGB_TX_LOCK(txr);
+               if (igb_txeof(txr))
+                       more = TRUE;
+#if __FreeBSD_version >= 800000
+               if (!drbr_empty(ifp, txr->br))
+                       igb_mq_start_locked(ifp, txr, NULL);
+#else
+               igb_start_locked(txr, ifp);
+#endif
+               IGB_TX_UNLOCK(txr);
+               if (more || (ifp->if_drv_flags & IFF_DRV_OACTIVE)) {
+                       taskqueue_enqueue(que->tq, &que->que_task);
+                       return;
+               }
+       }
+
+#ifdef DEVICE_POLLING
+       if (ifp->if_capenable & IFCAP_POLLING)
+               return;
+#endif
+       /* Reenable this interrupt */
+       if (que->eims)
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
+       else
+               igb_enable_intr(adapter);
+}
+
+/* Deal with link in a sleepable context */
+static void
+igb_handle_link(void *context, int pending)
+{
+       struct adapter *adapter = context;
+
+       adapter->hw.mac.get_link_status = 1;
+       igb_update_link_status(adapter);
+}
+
+/*********************************************************************
+ *
+ *  MSI/Legacy Deferred
+ *  Interrupt Service routine  
+ *
+ *********************************************************************/
+static int
+igb_irq_fast(void *arg)
+{
+       struct adapter          *adapter = arg;
+       struct igb_queue        *que = adapter->queues;
+       u32                     reg_icr;
+
+
+       reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+       /* Hot eject?  */
+       if (reg_icr == 0xffffffff)
+               return FILTER_STRAY;
+
+       /* Definitely not our interrupt.  */
+       if (reg_icr == 0x0)
+               return FILTER_STRAY;
+
+       if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0)
+               return FILTER_STRAY;
+
+       /*
+        * Mask interrupts until the taskqueue is finished running.  This is
+        * cheap, just assume that it is needed.  This also works around the
+        * MSI message reordering errata on certain systems.
+        */
+       igb_disable_intr(adapter);
+       taskqueue_enqueue(que->tq, &que->que_task);
+
+       /* Link status change */
+       if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
+               taskqueue_enqueue(que->tq, &adapter->link_task);
+
+       if (reg_icr & E1000_ICR_RXO)
+               adapter->rx_overruns++;
+       return FILTER_HANDLED;
+}
+
+#ifdef DEVICE_POLLING
+/*********************************************************************
+ *
+ *  Legacy polling routine : if using this code you MUST be sure that
+ *  multiqueue is not defined, ie, set igb_num_queues to 1.
+ *
+ *********************************************************************/
+#if __FreeBSD_version >= 800000
+#define POLL_RETURN_COUNT(a) (a)
+static int
+#else
+#define POLL_RETURN_COUNT(a)
+static void
+#endif
+igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
+{
+       struct adapter          *adapter = ifp->if_softc;
+       struct igb_queue        *que = adapter->queues;
+       struct tx_ring          *txr = adapter->tx_rings;
+       u32                     reg_icr, rx_done = 0;
+       u32                     loop = IGB_MAX_LOOP;
+       bool                    more;
+
+       IGB_CORE_LOCK(adapter);
+       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
+               IGB_CORE_UNLOCK(adapter);
+               return POLL_RETURN_COUNT(rx_done);
+       }
+
+       if (cmd == POLL_AND_CHECK_STATUS) {
+               reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+               /* Link status change */
+               if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))
+                       igb_handle_link(adapter, 0);
+
+               if (reg_icr & E1000_ICR_RXO)
+                       adapter->rx_overruns++;
+       }
+       IGB_CORE_UNLOCK(adapter);
+
+       igb_rxeof(que, count, &rx_done);
+
+       IGB_TX_LOCK(txr);
+       do {
+               more = igb_txeof(txr);
+       } while (loop-- && more);
+#if __FreeBSD_version >= 800000
+       if (!drbr_empty(ifp, txr->br))
+               igb_mq_start_locked(ifp, txr, NULL);
+#else
+       igb_start_locked(txr, ifp);
+#endif
+       IGB_TX_UNLOCK(txr);
+       return POLL_RETURN_COUNT(rx_done);
+}
+#endif /* DEVICE_POLLING */
+
+/*********************************************************************
+ *
+ *  MSIX TX Interrupt Service routine
+ *
+ **********************************************************************/
+static void
+igb_msix_que(void *arg)
+{
+       struct igb_queue *que = arg;
+       struct adapter *adapter = que->adapter;
+       struct tx_ring *txr = que->txr;
+       struct rx_ring *rxr = que->rxr;
+       u32             newitr = 0;
+       bool            more_tx, more_rx;
+
+       E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims);
+       ++que->irqs;
+
+       IGB_TX_LOCK(txr);
+       more_tx = igb_txeof(txr);
+       IGB_TX_UNLOCK(txr);
+
+       more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL);
+
+       if (igb_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+        **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+        if (que->eitr_setting)
+                E1000_WRITE_REG(&adapter->hw,
+                    E1000_EITR(que->msix), que->eitr_setting);
+        que->eitr_setting = 0;
+
+        /* Idle, do nothing */
+        if ((txr->bytes == 0) && (rxr->bytes == 0))
+                goto no_calc;
+                                
+        /* Used half Default if sub-gig */
+        if (adapter->link_speed != 1000)
+                newitr = IGB_DEFAULT_ITR / 2;
+        else {
+               if ((txr->bytes) && (txr->packets))
+                       newitr = txr->bytes/txr->packets;
+               if ((rxr->bytes) && (rxr->packets))
+                       newitr = max(newitr,
+                           (rxr->bytes / rxr->packets));
+                newitr += 24; /* account for hardware frame, crc */
+               /* set an upper boundary */
+               newitr = min(newitr, 3000);
+               /* Be nice to the mid range */
+                if ((newitr > 300) && (newitr < 1200))
+                        newitr = (newitr / 3);
+                else
+                        newitr = (newitr / 2);
+        }
+        newitr &= 0x7FFC;  /* Mask invalid bits */
+        if (adapter->hw.mac.type == e1000_82575)
+                newitr |= newitr << 16;
+        else
+                newitr |= E1000_EITR_CNT_IGNR;
+                 
+        /* save for next interrupt */
+        que->eitr_setting = newitr;
+
+        /* Reset state */
+        txr->bytes = 0;
+        txr->packets = 0;
+        rxr->bytes = 0;
+        rxr->packets = 0;
+
+no_calc:
+       /* Schedule a clean task if needed*/
+       if (more_tx || more_rx ||
+           (adapter->ifp->if_drv_flags & IFF_DRV_OACTIVE))
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else
+               /* Reenable this interrupt */
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Link Interrupt Service routine
+ *
+ **********************************************************************/
+
+static void
+igb_msix_link(void *arg)
+{
+       struct adapter  *adapter = arg;
+       u32             icr;
+
+       ++adapter->link_irq;
+       icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+       if (!(icr & E1000_ICR_LSC))
+               goto spurious;
+       igb_handle_link(adapter, 0);
+
+spurious:
+       /* Rearm */
+       E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC);
+       E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+       struct adapter *adapter = ifp->if_softc;
+       u_char fiber_type = IFM_1000_SX;
+
+       INIT_DEBUGOUT("igb_media_status: begin");
+
+       IGB_CORE_LOCK(adapter);
+       igb_update_link_status(adapter);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!adapter->link_active) {
+               IGB_CORE_UNLOCK(adapter);
+               return;
+       }
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
+           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes))
+               ifmr->ifm_active |= fiber_type | IFM_FDX;
+       else {
+               switch (adapter->link_speed) {
+               case 10:
+                       ifmr->ifm_active |= IFM_10_T;
+                       break;
+               case 100:
+                       ifmr->ifm_active |= IFM_100_TX;
+                       break;
+               case 1000:
+                       ifmr->ifm_active |= IFM_1000_T;
+                       break;
+               }
+               if (adapter->link_duplex == FULL_DUPLEX)
+                       ifmr->ifm_active |= IFM_FDX;
+               else
+                       ifmr->ifm_active |= IFM_HDX;
+       }
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+igb_media_change(struct ifnet *ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct ifmedia  *ifm = &adapter->media;
+
+       INIT_DEBUGOUT("igb_media_change: begin");
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return (EINVAL);
+
+       IGB_CORE_LOCK(adapter);
+       switch (IFM_SUBTYPE(ifm->ifm_media)) {
+       case IFM_AUTO:
+               adapter->hw.mac.autoneg = DO_AUTO_NEG;
+               adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+               break;
+       case IFM_1000_LX:
+       case IFM_1000_SX:
+       case IFM_1000_T:
+               adapter->hw.mac.autoneg = DO_AUTO_NEG;
+               adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+               break;
+       case IFM_100_TX:
+               adapter->hw.mac.autoneg = FALSE;
+               adapter->hw.phy.autoneg_advertised = 0;
+               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
+               else
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
+               break;
+       case IFM_10_T:
+               adapter->hw.mac.autoneg = FALSE;
+               adapter->hw.phy.autoneg_advertised = 0;
+               if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
+               else
+                       adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
+               break;
+       default:
+               device_printf(adapter->dev, "Unsupported media type\n");
+       }
+
+       igb_init_locked(adapter);
+       IGB_CORE_UNLOCK(adapter);
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to Advanced TX descriptors.
+ *  used by the 82575 adapter.
+ *  
+ **********************************************************************/
+
+static int
+igb_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+       struct adapter          *adapter = txr->adapter;
+       bus_dma_segment_t       segs[IGB_MAX_SCATTER];
+       bus_dmamap_t            map;
+       struct igb_tx_buffer    *tx_buffer, *tx_buffer_mapped;
+       union e1000_adv_tx_desc *txd = NULL;
+       struct mbuf             *m_head;
+       u32                     olinfo_status = 0, cmd_type_len = 0;
+       int                     nsegs, i, j, error, first, last = 0;
+       u32                     hdrlen = 0;
+
+       m_head = *m_headp;
+
+
+       /* Set basic descriptor constants */
+       cmd_type_len |= E1000_ADVTXD_DTYP_DATA;
+       cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
+
+       /*
+         * Map the packet for DMA.
+        *
+        * Capture the first descriptor index,
+        * this descriptor will have the index
+        * of the EOP which is the only one that
+        * now gets a DONE bit writeback.
+        */
+       first = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[first];
+       tx_buffer_mapped = tx_buffer;
+       map = tx_buffer->map;
+
+       error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+           *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+       if (error == EFBIG) {
+               struct mbuf *m;
+
+               m = m_defrag(*m_headp, M_DONTWAIT);
+               if (m == NULL) {
+                       adapter->mbuf_defrag_failed++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (ENOBUFS);
+               }
+               *m_headp = m;
+
+               /* Try it again */
+               error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+                   *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+               if (error == ENOMEM) {
+                       adapter->no_tx_dma_setup++;
+                       return (error);
+               } else if (error != 0) {
+                       adapter->no_tx_dma_setup++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (error);
+               }
+       } else if (error == ENOMEM) {
+               adapter->no_tx_dma_setup++;
+               return (error);
+       } else if (error != 0) {
+               adapter->no_tx_dma_setup++;
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return (error);
+       }
+
+       /* Check again to be sure we have enough descriptors */
+        if (nsegs > (txr->tx_avail - 2)) {
+                txr->no_desc_avail++;
+               bus_dmamap_unload(txr->txtag, map);
+               return (ENOBUFS);
+        }
+       m_head = *m_headp;
+
+        /*
+         * Set up the context descriptor:
+         * used when any hardware offload is done.
+        * This includes CSUM, VLAN, and TSO. It
+        * will use the first descriptor.
+         */
+        if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               if (igb_tso_setup(txr, m_head, &hdrlen)) {
+                       cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
+                       olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
+                       olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+               } else
+                       return (ENXIO); 
+       } else if (igb_tx_ctx_setup(txr, m_head))
+               olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
+
+       /* Calculate payload length */
+       olinfo_status |= ((m_head->m_pkthdr.len - hdrlen)
+           << E1000_ADVTXD_PAYLEN_SHIFT);
+
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               olinfo_status |= txr->me << 4;
+
+       /* Set up our transmit descriptors */
+       i = txr->next_avail_desc;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seg_len;
+               bus_addr_t seg_addr;
+
+               tx_buffer = &txr->tx_buffers[i];
+               txd = (union e1000_adv_tx_desc *)&txr->tx_base[i];
+               seg_addr = segs[j].ds_addr;
+               seg_len  = segs[j].ds_len;
+
+               txd->read.buffer_addr = htole64(seg_addr);
+               txd->read.cmd_type_len = htole32(cmd_type_len | seg_len);
+               txd->read.olinfo_status = htole32(olinfo_status);
+               last = i;
+               if (++i == adapter->num_tx_desc)
+                       i = 0;
+               tx_buffer->m_head = NULL;
+               tx_buffer->next_eop = -1;
+       }
+
+       txr->next_avail_desc = i;
+       txr->tx_avail -= nsegs;
+
+        tx_buffer->m_head = m_head;
+       tx_buffer_mapped->map = tx_buffer->map;
+       tx_buffer->map = map;
+        bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+        /*
+         * Last Descriptor of Packet
+        * needs End Of Packet (EOP)
+        * and Report Status (RS)
+         */
+        txd->read.cmd_type_len |=
+           htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS);
+       /*
+        * Keep track in the first buffer which
+        * descriptor will be written back
+        */
+       tx_buffer = &txr->tx_buffers[first];
+       tx_buffer->next_eop = last;
+       txr->watchdog_time = ticks;
+
+       /*
+        * Advance the Transmit Descriptor Tail (TDT), this tells the E1000
+        * that this frame is available to transmit.
+        */
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i);
+       ++txr->tx_packets;
+
+       return (0);
+
+}
+
+static void
+igb_set_promisc(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_promisc_set_vf(hw, e1000_promisc_enabled);
+               return;
+       }
+
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       if (ifp->if_flags & IFF_PROMISC) {
+               reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       } else if (ifp->if_flags & IFF_ALLMULTI) {
+               reg |= E1000_RCTL_MPE;
+               reg &= ~E1000_RCTL_UPE;
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       }
+}
+
+static void
+igb_disable_promisc(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_promisc_set_vf(hw, e1000_promisc_disabled);
+               return;
+       }
+       reg = E1000_READ_REG(hw, E1000_RCTL);
+       reg &=  (~E1000_RCTL_UPE);
+       reg &=  (~E1000_RCTL_MPE);
+       E1000_WRITE_REG(hw, E1000_RCTL, reg);
+}
+
+
+/*********************************************************************
+ *  Multicast Update
+ *
+ *  This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+
+static void
+igb_set_multi(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct ifmultiaddr *ifma;
+       u32 reg_rctl = 0;
+       u8  *mta;
+
+       int mcnt = 0;
+
+       IOCTL_DEBUGOUT("igb_set_multi: begin");
+
+       mta = adapter->mta;
+       bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN *
+           MAX_NUM_MULTICAST_ADDRESSES);
+
+#if __FreeBSD_version < 800000
+       IF_ADDR_LOCK(ifp);
+#else
+       if_maddr_rlock(ifp);
+#endif
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+
+               if (mcnt == MAX_NUM_MULTICAST_ADDRESSES)
+                       break;
+
+               bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
+                   &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN);
+               mcnt++;
+       }
+#if __FreeBSD_version < 800000
+       IF_ADDR_UNLOCK(ifp);
+#else
+       if_maddr_runlock(ifp);
+#endif
+
+       if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) {
+               reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+               reg_rctl |= E1000_RCTL_MPE;
+               E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl);
+       } else
+               e1000_update_mc_addr_list(&adapter->hw, mta, mcnt);
+}
+
+
+/*********************************************************************
+ *  Timer routine:
+ *     This routine checks for link status,
+ *     updates statistics, and does the watchdog.
+ *
+ **********************************************************************/
+
+static void
+igb_local_timer(void *arg)
+{
+       struct adapter          *adapter = arg;
+       device_t                dev = adapter->dev;
+       struct tx_ring          *txr = adapter->tx_rings;
+
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       igb_update_link_status(adapter);
+       igb_update_stats_counters(adapter);
+
+       /* 
+       ** If flow control has paused us since last checking
+       ** it invalidates the watchdog timing, so dont run it.
+       */
+       if (adapter->pause_frames) {
+               adapter->pause_frames = 0;
+               goto out;
+       }
+
+        /*
+        ** Watchdog: check for time since any descriptor was cleaned
+        */
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               if (txr->queue_status == IGB_QUEUE_HUNG) 
+                       goto timeout;
+out:
+       callout_reset(&adapter->timer, hz, igb_local_timer, adapter);
+#ifndef DEVICE_POLLING
+       /* Schedule all queue interrupts - deadlock protection */
+       E1000_WRITE_REG(&adapter->hw, E1000_EICS, adapter->que_mask);
+#endif
+       return;
+
+timeout:
+       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+            E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)),
+            E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me)));
+       device_printf(dev,"TX(%d) desc avail = %d,"
+            "Next TX to Clean = %d\n",
+            txr->me, txr->tx_avail, txr->next_to_clean);
+       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+       adapter->watchdog_events++;
+       igb_init_locked(adapter);
+}
+
+static void
+igb_update_link_status(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct ifnet *ifp = adapter->ifp;
+       device_t dev = adapter->dev;
+       struct tx_ring *txr = adapter->tx_rings;
+       u32 link_check, thstat, ctrl;
+
+       link_check = thstat = ctrl = 0;
+
+       /* Get the cached link value or read for real */
+        switch (hw->phy.media_type) {
+        case e1000_media_type_copper:
+                if (hw->mac.get_link_status) {
+                       /* Do the work to read phy */
+                        e1000_check_for_link(hw);
+                        link_check = !hw->mac.get_link_status;
+                } else
+                        link_check = TRUE;
+                break;
+        case e1000_media_type_fiber:
+                e1000_check_for_link(hw);
+                link_check = (E1000_READ_REG(hw, E1000_STATUS) &
+                                 E1000_STATUS_LU);
+                break;
+        case e1000_media_type_internal_serdes:
+                e1000_check_for_link(hw);
+                link_check = adapter->hw.mac.serdes_has_link;
+                break;
+       /* VF device is type_unknown */
+        case e1000_media_type_unknown:
+                e1000_check_for_link(hw);
+               link_check = !hw->mac.get_link_status;
+               /* Fall thru */
+        default:
+                break;
+        }
+
+       /* Check for thermal downshift or shutdown */
+       if (hw->mac.type == e1000_i350) {
+               thstat = E1000_READ_REG(hw, E1000_THSTAT);
+               ctrl = E1000_READ_REG(hw, E1000_CTRL_EXT);
+       }
+
+       /* Now we check if a transition has happened */
+       if (link_check && (adapter->link_active == 0)) {
+               e1000_get_speed_and_duplex(&adapter->hw, 
+                   &adapter->link_speed, &adapter->link_duplex);
+               if (bootverbose)
+                       device_printf(dev, "Link is up %d Mbps %s\n",
+                           adapter->link_speed,
+                           ((adapter->link_duplex == FULL_DUPLEX) ?
+                           "Full Duplex" : "Half Duplex"));
+               adapter->link_active = 1;
+               ifp->if_baudrate = adapter->link_speed * 1000000;
+               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+                   (thstat & E1000_THSTAT_LINK_THROTTLE))
+                       device_printf(dev, "Link: thermal downshift\n");
+               /* This can sleep */
+               if_link_state_change(ifp, LINK_STATE_UP);
+       } else if (!link_check && (adapter->link_active == 1)) {
+               ifp->if_baudrate = adapter->link_speed = 0;
+               adapter->link_duplex = 0;
+               if (bootverbose)
+                       device_printf(dev, "Link is Down\n");
+               if ((ctrl & E1000_CTRL_EXT_LINK_MODE_GMII) &&
+                   (thstat & E1000_THSTAT_PWR_DOWN))
+                       device_printf(dev, "Link: thermal shutdown\n");
+               adapter->link_active = 0;
+               /* This can sleep */
+               if_link_state_change(ifp, LINK_STATE_DOWN);
+               /* Turn off watchdogs */
+               for (int i = 0; i < adapter->num_queues; i++, txr++)
+                       txr->queue_status = IGB_QUEUE_IDLE;
+       }
+}
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+igb_stop(void *arg)
+{
+       struct adapter  *adapter = arg;
+       struct ifnet    *ifp = adapter->ifp;
+       struct tx_ring *txr = adapter->tx_rings;
+
+       IGB_CORE_LOCK_ASSERT(adapter);
+
+       INIT_DEBUGOUT("igb_stop: begin");
+
+       igb_disable_intr(adapter);
+
+       callout_stop(&adapter->timer);
+
+       /* Tell the stack that the interface is no longer active */
+       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       /* Unarm watchdog timer. */
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               txr->queue_status = IGB_QUEUE_IDLE;
+               IGB_TX_UNLOCK(txr);
+       }
+
+       e1000_reset_hw(&adapter->hw);
+       E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+       e1000_led_off(&adapter->hw);
+       e1000_cleanup_led(&adapter->hw);
+}
+
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+igb_identify_hardware(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+
+       /* Make sure our PCI config space has the necessary stuff set */
+       adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+       if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
+           (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) {
+               INIT_DEBUGOUT("Memory Access and/or Bus Master "
+                   "bits were not set!\n");
+               adapter->hw.bus.pci_cmd_word |=
+               (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+               pci_write_config(dev, PCIR_COMMAND,
+                   adapter->hw.bus.pci_cmd_word, 2);
+       }
+
+       /* Save off the information about this board */
+       adapter->hw.vendor_id = pci_get_vendor(dev);
+       adapter->hw.device_id = pci_get_device(dev);
+       adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       adapter->hw.subsystem_vendor_id =
+           pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       adapter->hw.subsystem_device_id =
+           pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       /* Set MAC type early for PCI setup */
+       e1000_set_mac_type(&adapter->hw);
+
+       /* Are we a VF device? */
+       if ((adapter->hw.mac.type == e1000_vfadapt) ||
+           (adapter->hw.mac.type == e1000_vfadapt_i350))
+               adapter->vf_ifp = 1;
+       else
+               adapter->vf_ifp = 0;
+}
+
+static int
+igb_allocate_pci_resources(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       int             rid;
+
+       rid = PCIR_BAR(0);
+       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &rid, RF_ACTIVE);
+       if (adapter->pci_mem == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: memory\n");
+               return (ENXIO);
+       }
+       adapter->osdep.mem_bus_space_tag =
+           rman_get_bustag(adapter->pci_mem);
+       adapter->osdep.mem_bus_space_handle =
+           rman_get_bushandle(adapter->pci_mem);
+       adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
+
+       adapter->num_queues = 1; /* Defaults for Legacy or MSI */
+
+       /* This will setup either MSI/X or MSI */
+       adapter->msix = igb_setup_msix(adapter);
+       adapter->hw.back = &adapter->osdep;
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Setup the Legacy or MSI Interrupt handler
+ *
+ **********************************************************************/
+static int
+igb_allocate_legacy(struct adapter *adapter)
+{
+       device_t                dev = adapter->dev;
+       struct igb_queue        *que = adapter->queues;
+       int                     error, rid = 0;
+
+       /* Turn off all interrupts */
+       E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff);
+
+       /* MSI RID is 1 */
+       if (adapter->msix == 1)
+               rid = 1;
+
+       /* We allocate a single interrupt resource */
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: "
+                   "interrupt\n");
+               return (ENXIO);
+       }
+
+       /*
+        * Try allocating a fast interrupt and the associated deferred
+        * processing contexts.
+        */
+       TASK_INIT(&que->que_task, 0, igb_handle_que, que);
+       /* Make tasklet for deferred link handling */
+       TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter);
+       que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT,
+           taskqueue_thread_enqueue, &que->tq);
+       taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq",
+           device_get_nameunit(adapter->dev));
+       if ((error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL,
+           adapter, &adapter->tag)) != 0) {
+               device_printf(dev, "Failed to register fast interrupt "
+                           "handler: %d\n", error);
+               taskqueue_free(que->tq);
+               que->tq = NULL;
+               return (error);
+       }
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup the MSIX Queue Interrupt handlers: 
+ *
+ **********************************************************************/
+static int
+igb_allocate_msix(struct adapter *adapter)
+{
+       device_t                dev = adapter->dev;
+       struct igb_queue        *que = adapter->queues;
+       int                     error, rid, vector = 0;
+
+
+       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+               rid = vector +1;
+               que->res = bus_alloc_resource_any(dev,
+                   SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,
+                           "Unable to allocate bus resource: "
+                           "MSIX Queue Interrupt\n");
+                       return (ENXIO);
+               }
+               error = bus_setup_intr(dev, que->res,
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
+                   igb_msix_que, que, &que->tag);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register Queue handler");
+                       return (error);
+               }
+#if __FreeBSD_version >= 800504
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               if (adapter->hw.mac.type == e1000_82575)
+                       que->eims = E1000_EICR_TX_QUEUE0 << i;
+               else
+                       que->eims = 1 << vector;
+               /*
+               ** Bind the msix vector, and thus the
+               ** rings to the corresponding cpu.
+               */
+               if (adapter->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+               /* Make tasklet for deferred handling */
+               TASK_INIT(&que->que_task, 0, igb_handle_que, que);
+               que->tq = taskqueue_create_fast("igb_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+                   device_get_nameunit(adapter->dev));
+       }
+
+       /* And Link */
+       rid = vector + 1;
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev,
+                   "Unable to allocate bus resource: "
+                   "MSIX Link Interrupt\n");
+               return (ENXIO);
+       }
+       if ((error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, NULL,
+           igb_msix_link, adapter, &adapter->tag)) != 0) {
+               device_printf(dev, "Failed to register Link handler");
+               return (error);
+       }
+#if __FreeBSD_version >= 800504
+       bus_describe_intr(dev, adapter->res, adapter->tag, "link");
+#endif
+       adapter->linkvec = vector;
+
+       return (0);
+}
+
+
+static void
+igb_configure_queues(struct adapter *adapter)
+{
+       struct  e1000_hw        *hw = &adapter->hw;
+       struct  igb_queue       *que;
+       u32                     tmp, ivar = 0, newitr = 0;
+
+       /* First turn on RSS capability */
+       if (adapter->hw.mac.type != e1000_82575)
+               E1000_WRITE_REG(hw, E1000_GPIE,
+                   E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME |
+                   E1000_GPIE_PBA | E1000_GPIE_NSICR);
+
+       /* Turn on MSIX */
+       switch (adapter->hw.mac.type) {
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_vfadapt:
+       case e1000_vfadapt_i350:
+               /* RX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i >> 1;
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i & 1) {
+                               ivar &= 0xFF00FFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
+                       } else {
+                               ivar &= 0xFFFFFF00;
+                               ivar |= que->msix | E1000_IVAR_VALID;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+               }
+               /* TX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i >> 1;
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i & 1) {
+                               ivar &= 0x00FFFFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
+                       } else {
+                               ivar &= 0xFFFF00FF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* And for the link interrupt */
+               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
+               adapter->link_mask = 1 << adapter->linkvec;
+               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
+               break;
+       case e1000_82576:
+               /* RX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i & 0x7; /* Each IVAR has two entries */
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i < 8) {
+                               ivar &= 0xFFFFFF00;
+                               ivar |= que->msix | E1000_IVAR_VALID;
+                       } else {
+                               ivar &= 0xFF00FFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 16;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+               /* TX entries */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       u32 index = i & 0x7; /* Each IVAR has two entries */
+                       ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index);
+                       que = &adapter->queues[i];
+                       if (i < 8) {
+                               ivar &= 0xFFFF00FF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 8;
+                       } else {
+                               ivar &= 0x00FFFFFF;
+                               ivar |= (que->msix | E1000_IVAR_VALID) << 24;
+                       }
+                       E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* And for the link interrupt */
+               ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8;
+               adapter->link_mask = 1 << adapter->linkvec;
+               E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar);
+               break;
+
+       case e1000_82575:
+                /* enable MSI-X support*/
+               tmp = E1000_READ_REG(hw, E1000_CTRL_EXT);
+                tmp |= E1000_CTRL_EXT_PBA_CLR;
+                /* Auto-Mask interrupts upon ICR read. */
+                tmp |= E1000_CTRL_EXT_EIAME;
+                tmp |= E1000_CTRL_EXT_IRCA;
+                E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp);
+
+               /* Queues */
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       que = &adapter->queues[i];
+                       tmp = E1000_EICR_RX_QUEUE0 << i;
+                       tmp |= E1000_EICR_TX_QUEUE0 << i;
+                       que->eims = tmp;
+                       E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
+                           i, que->eims);
+                       adapter->que_mask |= que->eims;
+               }
+
+               /* Link */
+               E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec),
+                   E1000_EIMS_OTHER);
+               adapter->link_mask |= E1000_EIMS_OTHER;
+       default:
+               break;
+       }
+
+       /* Set the starting interrupt rate */
+       if (igb_max_interrupt_rate > 0)
+               newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC;
+
+        if (hw->mac.type == e1000_82575)
+                newitr |= newitr << 16;
+        else
+                newitr |= E1000_EITR_CNT_IGNR;
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr);
+       }
+
+       return;
+}
+
+
+static void
+igb_free_pci_resources(struct adapter *adapter)
+{
+       struct          igb_queue *que = adapter->queues;
+       device_t        dev = adapter->dev;
+       int             rid;
+
+       /*
+       ** There is a slight possibility of a failure mode
+       ** in attach that will result in entering this function
+       ** before interrupt resources have been initialized, and
+       ** in that case we do not want to execute the loops below
+       ** We can detect this reliably by the state of the adapter
+       ** res pointer.
+       */
+       if (adapter->res == NULL)
+               goto mem;
+
+       /*
+        * First release all the interrupt resources:
+        */
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               rid = que->msix + 1;
+               if (que->tag != NULL) {
+                       bus_teardown_intr(dev, que->res, que->tag);
+                       que->tag = NULL;
+               }
+               if (que->res != NULL)
+                       bus_release_resource(dev,
+                           SYS_RES_IRQ, rid, que->res);
+       }
+
+       /* Clean the Legacy or Link interrupt last */
+       if (adapter->linkvec) /* we are doing MSIX */
+               rid = adapter->linkvec + 1;
+       else
+               (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+       if (adapter->tag != NULL) {
+               bus_teardown_intr(dev, adapter->res, adapter->tag);
+               adapter->tag = NULL;
+       }
+       if (adapter->res != NULL)
+               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+       if (adapter->msix)
+               pci_release_msi(dev);
+
+       if (adapter->msix_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
+
+       if (adapter->pci_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(0), adapter->pci_mem);
+
+}
+
+/*
+ * Setup Either MSI/X or MSI
+ */
+static int
+igb_setup_msix(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       int rid, want, queues, msgs;
+
+       /* tuneable override */
+       if (igb_enable_msix == 0)
+               goto msi;
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(IGB_MSIX_BAR);
+       adapter->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+               if (!adapter->msix_mem) {
+               /* May not be enabled */
+               device_printf(adapter->dev,
+                   "Unable to map MSIX table \n");
+               goto msi;
+       }
+
+       msgs = pci_msix_count(dev); 
+       if (msgs == 0) { /* system has msix disabled */
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem);
+               adapter->msix_mem = NULL;
+               goto msi;
+       }
+
+       /* Figure out a reasonable auto config value */
+       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+       /* Manual override */
+       if (igb_num_queues != 0)
+               queues = igb_num_queues;
+       if (queues > 8)  /* max queues */
+               queues = 8;
+
+       /* Can have max of 4 queues on 82575 */
+       if ((adapter->hw.mac.type == e1000_82575) && (queues > 4))
+               queues = 4;
+
+       /* Limit the VF devices to one queue */
+       if (adapter->vf_ifp)
+               queues = 1;
+
+       /*
+       ** One vector (RX/TX pair) per queue
+       ** plus an additional for Link interrupt
+       */
+       want = queues + 1;
+       if (msgs >= want)
+               msgs = want;
+       else {
+                       device_printf(adapter->dev,
+                   "MSIX Configuration Problem, "
+                   "%d vectors configured, but %d queues wanted!\n",
+                   msgs, want);
+               return (ENXIO);
+       }
+       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+                       device_printf(adapter->dev,
+                   "Using MSIX interrupts with %d vectors\n", msgs);
+               adapter->num_queues = queues;
+               return (msgs);
+       }
+msi:
+               msgs = pci_msi_count(dev);
+               if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+                       device_printf(adapter->dev,"Using MSI interrupt\n");
+       return (msgs);
+}
+
+/*********************************************************************
+ *
+ *  Set up an fresh starting state
+ *
+ **********************************************************************/
+static void
+igb_reset(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_fc_info *fc = &hw->fc;
+       struct ifnet    *ifp = adapter->ifp;
+       u32             pba = 0;
+       u16             hwm;
+
+       INIT_DEBUGOUT("igb_reset: begin");
+
+       /* Let the firmware know the OS is in control */
+       igb_get_hw_control(adapter);
+
+       /*
+        * Packet Buffer Allocation (PBA)
+        * Writing PBA sets the receive portion of the buffer
+        * the remainder is used for the transmit buffer.
+        */
+       switch (hw->mac.type) {
+       case e1000_82575:
+               pba = E1000_PBA_32K;
+               break;
+       case e1000_82576:
+       case e1000_vfadapt:
+               pba = E1000_READ_REG(hw, E1000_RXPBS);
+               pba &= E1000_RXPBS_SIZE_MASK_82576;
+               break;
+       case e1000_82580:
+       case e1000_i350:
+       case e1000_vfadapt_i350:
+               pba = E1000_READ_REG(hw, E1000_RXPBS);
+               pba = e1000_rxpbs_adjust_82580(pba);
+               break;
+               pba = E1000_PBA_35K;
+       default:
+               break;
+       }
+
+       /* Special needs in case of Jumbo frames */
+       if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) {
+               u32 tx_space, min_tx, min_rx;
+               pba = E1000_READ_REG(hw, E1000_PBA);
+               tx_space = pba >> 16;
+               pba &= 0xffff;
+               min_tx = (adapter->max_frame_size +
+                   sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2;
+               min_tx = roundup2(min_tx, 1024);
+               min_tx >>= 10;
+                min_rx = adapter->max_frame_size;
+                min_rx = roundup2(min_rx, 1024);
+                min_rx >>= 10;
+               if (tx_space < min_tx &&
+                   ((min_tx - tx_space) < pba)) {
+                       pba = pba - (min_tx - tx_space);
+                       /*
+                         * if short on rx space, rx wins
+                         * and must trump tx adjustment
+                        */
+                        if (pba < min_rx)
+                                pba = min_rx;
+               }
+               E1000_WRITE_REG(hw, E1000_PBA, pba);
+       }
+
+       INIT_DEBUGOUT1("igb_init: pba=%dK",pba);
+
+       /*
+        * These parameters control the automatic generation (Tx) and
+        * response (Rx) to Ethernet PAUSE frames.
+        * - High water mark should allow for at least two frames to be
+        *   received after sending an XOFF.
+        * - Low water mark works best when it is very near the high water mark.
+        *   This allows the receiver to restart by sending XON when it has
+        *   drained a bit.
+        */
+       hwm = min(((pba << 10) * 9 / 10),
+           ((pba << 10) - 2 * adapter->max_frame_size));
+
+       if (hw->mac.type < e1000_82576) {
+               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
+               fc->low_water = fc->high_water - 8;
+       } else {
+               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+               fc->low_water = fc->high_water - 16;
+       }
+
+       fc->pause_time = IGB_FC_PAUSE_TIME;
+       fc->send_xon = TRUE;
+
+       /* Issue a global reset */
+       e1000_reset_hw(hw);
+       E1000_WRITE_REG(hw, E1000_WUC, 0);
+
+       if (e1000_init_hw(hw) < 0)
+               device_printf(dev, "Hardware Initialization Failed\n");
+
+       /* Setup DMA Coalescing */
+       if ((hw->mac.type == e1000_i350) &&
+           (adapter->dma_coalesce == TRUE)) {
+               u32 reg;
+
+               hwm = (pba - 4) << 10;
+               reg = (((pba-6) << E1000_DMACR_DMACTHR_SHIFT)
+                   & E1000_DMACR_DMACTHR_MASK);
+
+               /* transition to L0x or L1 if available..*/
+               reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+               /* timer = +-1000 usec in 32usec intervals */
+               reg |= (1000 >> 5);
+               E1000_WRITE_REG(hw, E1000_DMACR, reg);
+
+               /* No lower threshold */
+               E1000_WRITE_REG(hw, E1000_DMCRTRH, 0);
+
+               /* set hwm to PBA -  2 * max frame size */
+               E1000_WRITE_REG(hw, E1000_FCRTC, hwm);
+
+               /* Set the interval before transition */
+               reg = E1000_READ_REG(hw, E1000_DMCTLX);
+               reg |= 0x800000FF; /* 255 usec */
+               E1000_WRITE_REG(hw, E1000_DMCTLX, reg);
+
+               /* free space in tx packet buffer to wake from DMA coal */
+               E1000_WRITE_REG(hw, E1000_DMCTXTH,
+                   (20480 - (2 * adapter->max_frame_size)) >> 6);
+
+               /* make low power state decision controlled by DMA coal */
+               reg = E1000_READ_REG(hw, E1000_PCIEMISC);
+               E1000_WRITE_REG(hw, E1000_PCIEMISC,
+                   reg | E1000_PCIEMISC_LX_DECISION);
+               device_printf(dev, "DMA Coalescing enabled\n");
+       }
+
+       E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN);
+       e1000_get_phy_info(hw);
+       e1000_check_for_link(hw);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+igb_setup_interface(device_t dev, struct adapter *adapter)
+{
+       struct ifnet   *ifp;
+
+       INIT_DEBUGOUT("igb_setup_interface: begin");
+
+       ifp = adapter->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "can not allocate ifnet structure\n");
+               return (-1);
+       }
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_mtu = ETHERMTU;
+       ifp->if_init =  igb_init;
+       ifp->if_softc = adapter;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = igb_ioctl;
+       ifp->if_start = igb_start;
+#if __FreeBSD_version >= 800000
+       ifp->if_transmit = igb_mq_start;
+       ifp->if_qflush = igb_qflush;
+#endif
+       IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1);
+       ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1;
+       IFQ_SET_READY(&ifp->if_snd);
+
+       ether_ifattach(ifp, adapter->hw.mac.addr);
+
+       ifp->if_capabilities = ifp->if_capenable = 0;
+
+       ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM;
+       ifp->if_capabilities |= IFCAP_TSO4;
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+
+       /* Don't enable LRO by default */
+       ifp->if_capabilities |= IFCAP_LRO;
+
+#ifdef DEVICE_POLLING
+       ifp->if_capabilities |= IFCAP_POLLING;
+#endif
+
+       /*
+        * Tell the upper layer(s) we
+        * support full VLAN capability.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+       ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+
+       /*
+       ** Dont turn this on by default, if vlans are
+       ** created on another pseudo device (eg. lagg)
+       ** then vlan events are not passed thru, breaking
+       ** operation, but with HW FILTER off it works. If
+       ** using vlans directly on the em driver you can
+       ** enable this and get full hardware tag filtering.
+       */
+       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&adapter->media, IFM_IMASK,
+           igb_media_change, igb_media_status);
+       if ((adapter->hw.phy.media_type == e1000_media_type_fiber) ||
+           (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) {
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+       } else {
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX,
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX,
+                           0, NULL);
+               ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX,
+                           0, NULL);
+               if (adapter->hw.phy.type != e1000_phy_ife) {
+                       ifmedia_add(&adapter->media,
+                               IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+                       ifmedia_add(&adapter->media,
+                               IFM_ETHER | IFM_1000_T, 0, NULL);
+               }
+       }
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+       return (0);
+}
+
+
+/*
+ * Manage DMA'able memory.
+ */
+static void
+igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+       if (error)
+               return;
+       *(bus_addr_t *) arg = segs[0].ds_addr;
+}
+
+static int
+igb_dma_malloc(struct adapter *adapter, bus_size_t size,
+        struct igb_dma_alloc *dma, int mapflags)
+{
+       int error;
+
+       error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */
+                               IGB_DBA_ALIGN, 0,       /* alignment, bounds */
+                               BUS_SPACE_MAXADDR,      /* lowaddr */
+                               BUS_SPACE_MAXADDR,      /* highaddr */
+                               NULL, NULL,             /* filter, filterarg */
+                               size,                   /* maxsize */
+                               1,                      /* nsegments */
+                               size,                   /* maxsegsize */
+                               0,                      /* flags */
+                               NULL,                   /* lockfunc */
+                               NULL,                   /* lockarg */
+                               &dma->dma_tag);
+       if (error) {
+               device_printf(adapter->dev,
+                   "%s: bus_dma_tag_create failed: %d\n",
+                   __func__, error);
+               goto fail_0;
+       }
+
+       error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
+           BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map);
+       if (error) {
+               device_printf(adapter->dev,
+                   "%s: bus_dmamem_alloc(%ju) failed: %d\n",
+                   __func__, (uintmax_t)size, error);
+               goto fail_2;
+       }
+
+       dma->dma_paddr = 0;
+       error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+           size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT);
+       if (error || dma->dma_paddr == 0) {
+               device_printf(adapter->dev,
+                   "%s: bus_dmamap_load failed: %d\n",
+                   __func__, error);
+               goto fail_3;
+       }
+
+       return (0);
+
+fail_3:
+       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+fail_2:
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+       bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+       dma->dma_map = NULL;
+       dma->dma_tag = NULL;
+
+       return (error);
+}
+
+static void
+igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma)
+{
+       if (dma->dma_tag == NULL)
+               return;
+       if (dma->dma_map != NULL) {
+               bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+               bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+               bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+               dma->dma_map = NULL;
+       }
+       bus_dma_tag_destroy(dma->dma_tag);
+       dma->dma_tag = NULL;
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the transmit and receive rings, and then
+ *  the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+igb_allocate_queues(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       struct igb_queue        *que = NULL;
+       struct tx_ring          *txr = NULL;
+       struct rx_ring          *rxr = NULL;
+       int rsize, tsize, error = E1000_SUCCESS;
+       int txconf = 0, rxconf = 0;
+
+       /* First allocate the top level queue structs */
+       if (!(adapter->queues =
+           (struct igb_queue *) malloc(sizeof(struct igb_queue) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate queue memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       /* Next allocate the TX ring struct memory */
+       if (!(adapter->tx_rings =
+           (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate TX ring memory\n");
+               error = ENOMEM;
+               goto tx_fail;
+       }
+
+       /* Now allocate the RX */
+       if (!(adapter->rx_rings =
+           (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate RX ring memory\n");
+               error = ENOMEM;
+               goto rx_fail;
+       }
+
+       tsize = roundup2(adapter->num_tx_desc *
+           sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN);
+       /*
+        * Now set up the TX queues, txconf is needed to handle the
+        * possibility that things fail midcourse and we need to
+        * undo memory gracefully
+        */ 
+       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+               /* Set up some basics */
+               txr = &adapter->tx_rings[i];
+               txr->adapter = adapter;
+               txr->me = i;
+
+               /* Initialize the TX lock */
+               snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+               if (igb_dma_malloc(adapter, tsize,
+                       &txr->txdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate TX Descriptor memory\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+               txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr;
+               bzero((void *)txr->tx_base, tsize);
+
+               /* Now allocate transmit buffers for the ring */
+               if (igb_allocate_transmit_buffers(txr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up transmit buffers\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#if __FreeBSD_version >= 800000
+               /* Allocate a buf ring */
+               txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF,
+                   M_WAITOK, &txr->tx_mtx);
+#endif
+       }
+
+       /*
+        * Next the RX queues...
+        */ 
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
+       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+               rxr = &adapter->rx_rings[i];
+               rxr->adapter = adapter;
+               rxr->me = i;
+
+               /* Initialize the RX lock */
+               snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+               if (igb_dma_malloc(adapter, rsize,
+                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate RxDescriptor memory\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+               rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+               bzero((void *)rxr->rx_base, rsize);
+
+               /* Allocate receive buffers for the ring*/
+               if (igb_allocate_receive_buffers(rxr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up receive buffers\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+       }
+
+       /*
+       ** Finally set up the queue holding structs
+       */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               que->adapter = adapter;
+               que->txr = &adapter->tx_rings[i];
+               que->rxr = &adapter->rx_rings[i];
+       }
+
+       return (0);
+
+err_rx_desc:
+       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+               igb_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+               igb_dma_free(adapter, &txr->txdma);
+       free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+#if __FreeBSD_version >= 800000
+       buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+       free(adapter->queues, M_DEVBUF);
+fail:
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+igb_allocate_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       device_t dev = adapter->dev;
+       struct igb_tx_buffer *txbuf;
+       int error, i;
+
+       /*
+        * Setup DMA descriptor areas.
+        */
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                              1, 0,                    /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,              /* filter, filterarg */
+                              IGB_TSO_SIZE,            /* maxsize */
+                              IGB_MAX_SCATTER,         /* nsegments */
+                              PAGE_SIZE,               /* maxsegsize */
+                              0,                       /* flags */
+                              NULL,                    /* lockfunc */
+                              NULL,                    /* lockfuncarg */
+                              &txr->txtag))) {
+               device_printf(dev,"Unable to allocate TX DMA tag\n");
+               goto fail;
+       }
+
+       if (!(txr->tx_buffers =
+           (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) *
+           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate tx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+        /* Create the descriptor buffer dma maps */
+       txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+               if (error != 0) {
+                       device_printf(dev, "Unable to create TX DMA map\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       /* We free all, it handles case where we are in the middle */
+       igb_free_transmit_structures(adapter);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+igb_setup_transmit_ring(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct igb_tx_buffer *txbuf;
+       int i;
+
+       /* Clear the old descriptor contents */
+       IGB_TX_LOCK(txr);
+       bzero((void *)txr->tx_base,
+             (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc);
+       /* Reset indices */
+       txr->next_avail_desc = 0;
+       txr->next_to_clean = 0;
+
+       /* Free any existing tx buffers. */
+        txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, txbuf->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+               /* clear the watch index */
+               txbuf->next_eop = -1;
+        }
+
+       /* Set number of descriptors available */
+       txr->tx_avail = adapter->num_tx_desc;
+
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       IGB_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all transmit rings.
+ *
+ **********************************************************************/
+static void
+igb_setup_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               igb_setup_transmit_ring(txr);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+igb_initialize_transmit_units(struct adapter *adapter)
+{
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             tctl, txdctl;
+
+       INIT_DEBUGOUT("igb_initialize_transmit_units: begin");
+       tctl = txdctl = 0;
+
+       /* Setup the Tx Descriptor Rings */
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               u64 bus_addr = txr->txdma.dma_paddr;
+
+               E1000_WRITE_REG(hw, E1000_TDLEN(i),
+                   adapter->num_tx_desc * sizeof(struct e1000_tx_desc));
+               E1000_WRITE_REG(hw, E1000_TDBAH(i),
+                   (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_TDBAL(i),
+                   (uint32_t)bus_addr);
+
+               /* Setup the HW Tx Head and Tail descriptor pointers */
+               E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+               E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+
+               HW_DEBUGOUT2("Base = %x, Length = %x\n",
+                   E1000_READ_REG(hw, E1000_TDBAL(i)),
+                   E1000_READ_REG(hw, E1000_TDLEN(i)));
+
+               txr->queue_status = IGB_QUEUE_IDLE;
+
+               txdctl |= IGB_TX_PTHRESH;
+               txdctl |= IGB_TX_HTHRESH << 8;
+               txdctl |= IGB_TX_WTHRESH << 16;
+               txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+               E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
+       }
+
+       if (adapter->vf_ifp)
+               return;
+
+       e1000_config_collision_dist(hw);
+
+       /* Program the Transmit Control Register */
+       tctl = E1000_READ_REG(hw, E1000_TCTL);
+       tctl &= ~E1000_TCTL_CT;
+       tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
+                  (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
+
+       /* This write will effectively turn on the transmit unit. */
+       E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+}
+
+/*********************************************************************
+ *
+ *  Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+igb_free_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IGB_TX_LOCK(txr);
+               igb_free_transmit_buffers(txr);
+               igb_dma_free(adapter, &txr->txdma);
+               IGB_TX_UNLOCK(txr);
+               IGB_TX_LOCK_DESTROY(txr);
+       }
+       free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+igb_free_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct igb_tx_buffer *tx_buffer;
+       int             i;
+
+       INIT_DEBUGOUT("free_transmit_ring: begin");
+
+       if (txr->tx_buffers == NULL)
+               return;
+
+       tx_buffer = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+               if (tx_buffer->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       m_freem(tx_buffer->m_head);
+                       tx_buffer->m_head = NULL;
+                       if (tx_buffer->map != NULL) {
+                               bus_dmamap_destroy(txr->txtag,
+                                   tx_buffer->map);
+                               tx_buffer->map = NULL;
+                       }
+               } else if (tx_buffer->map != NULL) {
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       bus_dmamap_destroy(txr->txtag,
+                           tx_buffer->map);
+                       tx_buffer->map = NULL;
+               }
+       }
+#if __FreeBSD_version >= 800000
+       if (txr->br != NULL)
+               buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       if (txr->tx_buffers != NULL) {
+               free(txr->tx_buffers, M_DEVBUF);
+               txr->tx_buffers = NULL;
+       }
+       if (txr->txtag != NULL) {
+               bus_dma_tag_destroy(txr->txtag);
+               txr->txtag = NULL;
+       }
+       return;
+}
+
+/**********************************************************************
+ *
+ *  Setup work for hardware segmentation offload (TSO)
+ *
+ **********************************************************************/
+static boolean_t
+igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen)
+{
+       struct adapter *adapter = txr->adapter;
+       struct e1000_adv_tx_context_desc *TXD;
+       struct igb_tx_buffer        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       u32 mss_l4len_idx = 0;
+       u16 vtag = 0;
+       int ctxd, ehdrlen, ip_hlen, tcp_hlen;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct tcphdr *th;
+
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       else
+               ehdrlen = ETHER_HDR_LEN;
+
+       /* Ensure we have at least the IP+TCP header in the first mbuf. */
+       if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+               return FALSE;
+
+       /* Only supports IPV4 for now */
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       if (ip->ip_p != IPPROTO_TCP)
+                return FALSE;   /* 0 */
+       ip->ip_sum = 0;
+       ip_hlen = ip->ip_hl << 2;
+       th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+       th->th_sum = in_pseudo(ip->ip_src.s_addr,
+           ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+       tcp_hlen = th->th_off << 2;
+       /*
+        * Calculate header length, this is used
+        * in the transmit desc in igb_xmit
+        */
+       *hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+       /* VLAN MACLEN IPLEN */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+       }
+
+       vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT);
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT);
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               mss_l4len_idx |= txr->me << 4;
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+       tx_buffer->m_head = NULL;
+       tx_buffer->next_eop = -1;
+
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->next_avail_desc = ctxd;
+       return TRUE;
+}
+
+
+/*********************************************************************
+ *
+ *  Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static bool
+igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter *adapter = txr->adapter;
+       struct e1000_adv_tx_context_desc *TXD;
+       struct igb_tx_buffer        *tx_buffer;
+       u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx;
+       struct ether_vlan_header *eh;
+       struct ip *ip = NULL;
+       struct ip6_hdr *ip6;
+       int  ehdrlen, ctxd, ip_hlen = 0;
+       u16     etype, vtag = 0;
+       u8      ipproto = 0;
+       bool    offload = TRUE;
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+       vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0;
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       /*
+       ** In advanced descriptors the vlan tag must 
+       ** be placed into the context descriptor, thus
+       ** we need to be here just for that setup.
+       */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT);
+       } else if (offload == FALSE)
+               return FALSE;
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present,
+        * helpful for QinQ too.
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               etype = ntohs(eh->evl_proto);
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       } else {
+               etype = ntohs(eh->evl_encap_proto);
+               ehdrlen = ETHER_HDR_LEN;
+       }
+
+       /* Set the ether header length */
+       vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT;
+
+       switch (etype) {
+               case ETHERTYPE_IP:
+                       ip = (struct ip *)(mp->m_data + ehdrlen);
+                       ip_hlen = ip->ip_hl << 2;
+                       if (mp->m_len < ehdrlen + ip_hlen) {
+                               offload = FALSE;
+                               break;
+                       }
+                       ipproto = ip->ip_p;
+                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4;
+                       break;
+               case ETHERTYPE_IPV6:
+                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+                       ip_hlen = sizeof(struct ip6_hdr);
+                       ipproto = ip6->ip6_nxt;
+                       type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6;
+                       break;
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       vlan_macip_lens |= ip_hlen;
+       type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
+
+       switch (ipproto) {
+               case IPPROTO_TCP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP;
+                       break;
+               case IPPROTO_UDP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP;
+                       break;
+#if __FreeBSD_version >= 800000
+               case IPPROTO_SCTP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+                               type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP;
+                       break;
+#endif
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       /* 82575 needs the queue index added */
+       if (adapter->hw.mac.type == e1000_82575)
+               mss_l4len_idx = txr->me << 4;
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       tx_buffer->m_head = NULL;
+       tx_buffer->next_eop = -1;
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+       txr->next_avail_desc = ctxd;
+       --txr->tx_avail;
+
+        return (offload);
+}
+
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ *  TRUE return means there's work in the ring to clean, FALSE its empty.
+ **********************************************************************/
+static bool
+igb_txeof(struct tx_ring *txr)
+{
+       struct adapter  *adapter = txr->adapter;
+        int first, last, done, processed;
+        struct igb_tx_buffer *tx_buffer;
+        struct e1000_tx_desc   *tx_desc, *eop_desc;
+       struct ifnet   *ifp = adapter->ifp;
+
+       IGB_TX_LOCK_ASSERT(txr);
+
+        if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IGB_QUEUE_IDLE;
+                return FALSE;
+       }
+
+       processed = 0;
+        first = txr->next_to_clean;
+        tx_desc = &txr->tx_base[first];
+        tx_buffer = &txr->tx_buffers[first];
+       last = tx_buffer->next_eop;
+        eop_desc = &txr->tx_base[last];
+
+       /*
+        * What this does is get the index of the
+        * first descriptor AFTER the EOP of the 
+        * first packet, that way we can do the
+        * simple comparison on the inner while loop.
+        */
+       if (++last == adapter->num_tx_desc)
+               last = 0;
+       done = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+        while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) {
+               /* We clean the range of the packet */
+               while (first != done) {
+                       tx_desc->upper.data = 0;
+                       tx_desc->lower.data = 0;
+                       tx_desc->buffer_addr = 0;
+                       ++txr->tx_avail;
+                       ++processed;
+
+                       if (tx_buffer->m_head) {
+                               txr->bytes +=
+                                   tx_buffer->m_head->m_pkthdr.len;
+                               bus_dmamap_sync(txr->txtag,
+                                   tx_buffer->map,
+                                   BUS_DMASYNC_POSTWRITE);
+                               bus_dmamap_unload(txr->txtag,
+                                   tx_buffer->map);
+
+                               m_freem(tx_buffer->m_head);
+                               tx_buffer->m_head = NULL;
+                       }
+                       tx_buffer->next_eop = -1;
+                       txr->watchdog_time = ticks;
+
+                       if (++first == adapter->num_tx_desc)
+                               first = 0;
+
+                       tx_buffer = &txr->tx_buffers[first];
+                       tx_desc = &txr->tx_base[first];
+               }
+               ++txr->packets;
+               ++ifp->if_opackets;
+               /* See if we can continue to the next packet */
+               last = tx_buffer->next_eop;
+               if (last != -1) {
+                       eop_desc = &txr->tx_base[last];
+                       /* Get new done point */
+                       if (++last == adapter->num_tx_desc) last = 0;
+                       done = last;
+               } else
+                       break;
+        }
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+        txr->next_to_clean = first;
+
+       /*
+       ** Watchdog calculation, we know there's
+       ** work outstanding or the first return
+       ** would have been taken, so none processed
+       ** for too long indicates a hang.
+       */
+       if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG))
+               txr->queue_status = IGB_QUEUE_HUNG;
+
+        /*
+         * If we have a minimum free, clear IFF_DRV_OACTIVE
+         * to tell the stack that it is OK to send packets.
+         */
+        if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) {                
+                ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+               /* All clean, turn off the watchdog */
+                if (txr->tx_avail == adapter->num_tx_desc) {
+                       txr->queue_status = IGB_QUEUE_IDLE;
+                       return (FALSE);
+               }
+        }
+       return (TRUE);
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+igb_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+       struct adapter          *adapter = rxr->adapter;
+       bus_dma_segment_t       hseg[1];
+       bus_dma_segment_t       pseg[1];
+       struct igb_rx_buf       *rxbuf;
+       struct mbuf             *mh, *mp;
+       int                     i, j, nsegs, error;
+       bool                    refreshed = FALSE;
+
+       i = j = rxr->next_to_refresh;
+       /*
+       ** Get one descriptor beyond
+       ** our work mark to control
+       ** the loop.
+        */
+       if (++j == adapter->num_rx_desc)
+               j = 0;
+
+       while (j != limit) {
+               rxbuf = &rxr->rx_buffers[i];
+               /* No hdr mbuf used with header split off */
+               if (rxr->hdr_split == FALSE)
+                       goto no_split;
+               if (rxbuf->m_head == NULL) {
+                       mh = m_gethdr(M_DONTWAIT, MT_DATA);
+                       if (mh == NULL)
+                               goto update;
+               } else
+                       mh = rxbuf->m_head;
+
+               mh->m_pkthdr.len = mh->m_len = MHLEN;
+               mh->m_len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: hdr dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mh);
+                       rxbuf->m_head = NULL;
+                       goto update;
+               }
+               rxbuf->m_head = mh;
+               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.hdr_addr =
+                   htole64(hseg[0].ds_addr);
+no_split:
+               if (rxbuf->m_pack == NULL) {
+                       mp = m_getjcl(M_DONTWAIT, MT_DATA,
+                           M_PKTHDR, adapter->rx_mbuf_sz);
+                       if (mp == NULL)
+                               goto update;
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: payload dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
+               }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
+               refreshed = TRUE; /* I feel wefreshed :) */
+
+               i = j; /* our next is precalculated */
+               rxr->next_to_refresh = i;
+               if (++j == adapter->num_rx_desc)
+                       j = 0;
+       }
+update:
+       if (refreshed) /* update tail */
+               E1000_WRITE_REG(&adapter->hw,
+                   E1000_RDT(rxr->me), rxr->next_to_refresh);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per received packet, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've allocated.
+ *
+ **********************************************************************/
+static int
+igb_allocate_receive_buffers(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       device_t                dev = adapter->dev;
+       struct igb_rx_buf       *rxbuf;
+       int                     i, bsize, error;
+
+       bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc;
+       if (!(rxr->rx_buffers =
+           (struct igb_rx_buf *) malloc(bsize,
+           M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate rx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                                  1, 0,                /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MSIZE,               /* maxsize */
+                                  1,                   /* nsegments */
+                                  MSIZE,               /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->htag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),
+                                  1, 0,                /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MJUM9BYTES,          /* maxsize */
+                                  1,                   /* nsegments */
+                                  MJUM9BYTES,          /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->ptag))) {
+               device_printf(dev, "Unable to create RX payload DMA tag\n");
+               goto fail;
+       }
+
+       for (i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               error = bus_dmamap_create(rxr->htag,
+                   BUS_DMA_NOWAIT, &rxbuf->hmap);
+               if (error) {
+                       device_printf(dev,
+                           "Unable to create RX head DMA maps\n");
+                       goto fail;
+               }
+               error = bus_dmamap_create(rxr->ptag,
+                   BUS_DMA_NOWAIT, &rxbuf->pmap);
+               if (error) {
+                       device_printf(dev,
+                           "Unable to create RX packet DMA maps\n");
+                       goto fail;
+               }
+       }
+
+       return (0);
+
+fail:
+       /* Frees all, but can handle partial completion */
+       igb_free_receive_structures(adapter);
+       return (error);
+}
+
+
+static void
+igb_free_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       struct igb_rx_buf       *rxbuf;
+
+
+       for (int i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                       rxbuf->m_head->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_head);
+               }
+               if (rxbuf->m_pack != NULL) {
+                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                       rxbuf->m_pack->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_pack);
+               }
+               rxbuf->m_head = NULL;
+               rxbuf->m_pack = NULL;
+       }
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+igb_setup_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter;
+       struct  ifnet           *ifp;
+       device_t                dev;
+       struct igb_rx_buf       *rxbuf;
+       bus_dma_segment_t       pseg[1], hseg[1];
+       struct lro_ctrl         *lro = &rxr->lro;
+       int                     rsize, nsegs, error = 0;
+
+       adapter = rxr->adapter;
+       dev = adapter->dev;
+       ifp = adapter->ifp;
+
+       /* Clear the ring contents */
+       IGB_RX_LOCK(rxr);
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN);
+       bzero((void *)rxr->rx_base, rsize);
+
+       /*
+       ** Free current RX buffer structures and their mbufs
+       */
+       igb_free_receive_ring(rxr);
+
+       /* Configure for header split? */
+       if (igb_header_split)
+               rxr->hdr_split = TRUE;
+
+        /* Now replenish the ring mbufs */
+       for (int j = 0; j < adapter->num_rx_desc; ++j) {
+               struct mbuf     *mh, *mp;
+
+               rxbuf = &rxr->rx_buffers[j];
+               if (rxr->hdr_split == FALSE)
+                       goto skip_head;
+
+               /* First the header */
+               rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA);
+               if (rxbuf->m_head == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               m_adj(rxbuf->m_head, ETHER_ALIGN);
+               mh = rxbuf->m_head;
+               mh->m_len = mh->m_pkthdr.len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, rxbuf->m_head, hseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) /* Nothing elegant to do here */
+                        goto fail;
+               bus_dmamap_sync(rxr->htag,
+                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+               /* Now the payload cluster */
+               rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA,
+                   M_PKTHDR, adapter->rx_mbuf_sz);
+               if (rxbuf->m_pack == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               mp = rxbuf->m_pack;
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0)
+                        goto fail;
+               bus_dmamap_sync(rxr->ptag,
+                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+        }
+
+       /* Setup our descriptor indices */
+       rxr->next_to_check = 0;
+       rxr->next_to_refresh = adapter->num_rx_desc - 1;
+       rxr->lro_enabled = FALSE;
+       rxr->rx_split_packets = 0;
+       rxr->rx_bytes = 0;
+
+       rxr->fmp = NULL;
+       rxr->lmp = NULL;
+       rxr->discard = FALSE;
+
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       /*
+       ** Now set up the LRO interface, we
+       ** also only do head split when LRO
+       ** is enabled, since so often they
+       ** are undesireable in similar setups.
+       */
+       if (ifp->if_capenable & IFCAP_LRO) {
+               error = tcp_lro_init(lro);
+               if (error) {
+                       device_printf(dev, "LRO Initialization failed!\n");
+                       goto fail;
+               }
+               INIT_DEBUGOUT("RX LRO Initialized\n");
+               rxr->lro_enabled = TRUE;
+               lro->ifp = adapter->ifp;
+       }
+
+       IGB_RX_UNLOCK(rxr);
+       return (0);
+
+fail:
+       igb_free_receive_ring(rxr);
+       IGB_RX_UNLOCK(rxr);
+       return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+igb_setup_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+       int i;
+
+       for (i = 0; i < adapter->num_queues; i++, rxr++)
+               if (igb_setup_receive_ring(rxr))
+                       goto fail;
+
+       return (0);
+fail:
+       /*
+        * Free RX buffers allocated so far, we will only handle
+        * the rings that completed, the failing case will have
+        * cleaned up for itself. 'i' is the endpoint.
+        */
+       for (int j = 0; j > i; ++j) {
+               rxr = &adapter->rx_rings[i];
+               IGB_RX_LOCK(rxr);
+               igb_free_receive_ring(rxr);
+               IGB_RX_UNLOCK(rxr);
+       }
+
+       return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ *  Enable receive unit.
+ *
+ **********************************************************************/
+static void
+igb_initialize_receive_units(struct adapter *adapter)
+{
+       struct rx_ring  *rxr = adapter->rx_rings;
+       struct ifnet    *ifp = adapter->ifp;
+       struct e1000_hw *hw = &adapter->hw;
+       u32             rctl, rxcsum, psize, srrctl = 0;
+
+       INIT_DEBUGOUT("igb_initialize_receive_unit: begin");
+
+       /*
+        * Make sure receives are disabled while setting
+        * up the descriptor ring
+        */
+       rctl = E1000_READ_REG(hw, E1000_RCTL);
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+
+       /*
+       ** Set up for header split
+       */
+       if (igb_header_split) {
+               /* Use a standard mbuf for the header */
+               srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+               srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+       } else
+               srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+       /*
+       ** Set up for jumbo frames
+       */
+       if (ifp->if_mtu > ETHERMTU) {
+               rctl |= E1000_RCTL_LPE;
+               if (adapter->rx_mbuf_sz == MJUMPAGESIZE) {
+                       srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+                       rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX;
+               } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) {
+                       srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+                       rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX;
+               }
+               /* Set maximum packet len */
+               psize = adapter->max_frame_size;
+               /* are we on a vlan? */
+               if (adapter->ifp->if_vlantrunk != NULL)
+                       psize += VLAN_TAG_SIZE;
+               E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize);
+       } else {
+               rctl &= ~E1000_RCTL_LPE;
+               srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+               rctl |= E1000_RCTL_SZ_2048;
+       }
+
+       /* Setup the Base and Length of the Rx Descriptor Rings */
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               u64 bus_addr = rxr->rxdma.dma_paddr;
+               u32 rxdctl;
+
+               E1000_WRITE_REG(hw, E1000_RDLEN(i),
+                   adapter->num_rx_desc * sizeof(struct e1000_rx_desc));
+               E1000_WRITE_REG(hw, E1000_RDBAH(i),
+                   (uint32_t)(bus_addr >> 32));
+               E1000_WRITE_REG(hw, E1000_RDBAL(i),
+                   (uint32_t)bus_addr);
+               E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
+               /* Enable this Queue */
+               rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
+               rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+               rxdctl &= 0xFFF00000;
+               rxdctl |= IGB_RX_PTHRESH;
+               rxdctl |= IGB_RX_HTHRESH << 8;
+               rxdctl |= IGB_RX_WTHRESH << 16;
+               E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
+       }
+
+       /*
+       ** Setup for RX MultiQueue
+       */
+       rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+       if (adapter->num_queues >1) {
+               u32 random[10], mrqc, shift = 0;
+               union igb_reta {
+                       u32 dword;
+                       u8  bytes[4];
+               } reta;
+
+               arc4rand(&random, sizeof(random), 0);
+               if (adapter->hw.mac.type == e1000_82575)
+                       shift = 6;
+               /* Warning FM follows */
+               for (int i = 0; i < 128; i++) {
+                       reta.bytes[i & 3] =
+                           (i % adapter->num_queues) << shift;
+                       if ((i & 3) == 3)
+                               E1000_WRITE_REG(hw,
+                                   E1000_RETA(i >> 2), reta.dword);
+               }
+               /* Now fill in hash table */
+               mrqc = E1000_MRQC_ENABLE_RSS_4Q;
+               for (int i = 0; i < 10; i++)
+                       E1000_WRITE_REG_ARRAY(hw,
+                           E1000_RSSRK(0), i, random[i]);
+
+               mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+                   E1000_MRQC_RSS_FIELD_IPV4_TCP);
+               mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
+                   E1000_MRQC_RSS_FIELD_IPV6_TCP);
+               mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP |
+                   E1000_MRQC_RSS_FIELD_IPV6_UDP);
+               mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
+                   E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
+
+               E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+               /*
+               ** NOTE: Receive Full-Packet Checksum Offload 
+               ** is mutually exclusive with Multiqueue. However
+               ** this is not the same as TCP/IP checksums which
+               ** still work.
+               */
+               rxcsum |= E1000_RXCSUM_PCSD;
+#if __FreeBSD_version >= 800000
+               /* For SCTP Offload */
+               if ((hw->mac.type == e1000_82576)
+                   && (ifp->if_capenable & IFCAP_RXCSUM))
+                       rxcsum |= E1000_RXCSUM_CRCOFL;
+#endif
+       } else {
+               /* Non RSS setup */
+               if (ifp->if_capenable & IFCAP_RXCSUM) {
+                       rxcsum |= E1000_RXCSUM_IPPCSE;
+#if __FreeBSD_version >= 800000
+                       if (adapter->hw.mac.type == e1000_82576)
+                               rxcsum |= E1000_RXCSUM_CRCOFL;
+#endif
+               } else
+                       rxcsum &= ~E1000_RXCSUM_TUOFL;
+       }
+       E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+
+       /* Setup the Receive Control Register */
+       rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+       rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
+                  E1000_RCTL_RDMTS_HALF |
+                  (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+       /* Strip CRC bytes. */
+       rctl |= E1000_RCTL_SECRC;
+       /* Make sure VLAN Filters are off */
+       rctl &= ~E1000_RCTL_VFE;
+       /* Don't store bad packets */
+       rctl &= ~E1000_RCTL_SBP;
+
+       /* Enable Receives */
+       E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+       /*
+        * Setup the HW Rx Head and Tail Descriptor Pointers
+        *   - needs to be after enable
+        */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               rxr = &adapter->rx_rings[i];
+               E1000_WRITE_REG(hw, E1000_RDH(i), rxr->next_to_check);
+               E1000_WRITE_REG(hw, E1000_RDT(i), rxr->next_to_refresh);
+       }
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free receive rings.
+ *
+ **********************************************************************/
+static void
+igb_free_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               struct lro_ctrl *lro = &rxr->lro;
+               igb_free_receive_buffers(rxr);
+               tcp_lro_free(lro);
+               igb_dma_free(adapter, &rxr->rxdma);
+       }
+
+       free(adapter->rx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free receive ring data structures.
+ *
+ **********************************************************************/
+static void
+igb_free_receive_buffers(struct rx_ring *rxr)
+{
+       struct adapter          *adapter = rxr->adapter;
+       struct igb_rx_buf       *rxbuf;
+       int i;
+
+       INIT_DEBUGOUT("free_receive_structures: begin");
+
+       /* Cleanup any existing buffers */
+       if (rxr->rx_buffers != NULL) {
+               for (i = 0; i < adapter->num_rx_desc; i++) {
+                       rxbuf = &rxr->rx_buffers[i];
+                       if (rxbuf->m_head != NULL) {
+                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                               rxbuf->m_head->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_head);
+                       }
+                       if (rxbuf->m_pack != NULL) {
+                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                               rxbuf->m_pack->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_pack);
+                       }
+                       rxbuf->m_head = NULL;
+                       rxbuf->m_pack = NULL;
+                       if (rxbuf->hmap != NULL) {
+                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+                               rxbuf->hmap = NULL;
+                       }
+                       if (rxbuf->pmap != NULL) {
+                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+                               rxbuf->pmap = NULL;
+                       }
+               }
+               if (rxr->rx_buffers != NULL) {
+                       free(rxr->rx_buffers, M_DEVBUF);
+                       rxr->rx_buffers = NULL;
+               }
+       }
+
+       if (rxr->htag != NULL) {
+               bus_dma_tag_destroy(rxr->htag);
+               rxr->htag = NULL;
+       }
+       if (rxr->ptag != NULL) {
+               bus_dma_tag_destroy(rxr->ptag);
+               rxr->ptag = NULL;
+       }
+}
+
+static __inline void
+igb_rx_discard(struct rx_ring *rxr, int i)
+{
+       struct igb_rx_buf       *rbuf;
+
+       rbuf = &rxr->rx_buffers[i];
+
+       /* Partially received? Free the chain */
+       if (rxr->fmp != NULL) {
+               rxr->fmp->m_flags |= M_PKTHDR;
+               m_freem(rxr->fmp);
+               rxr->fmp = NULL;
+               rxr->lmp = NULL;
+       }
+
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
+
+       return;
+}
+
+static __inline void
+igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+
+       /*
+        * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+        * should be computed by hardware. Also it should not have VLAN tag in
+        * ethernet header.
+        */
+       if (rxr->lro_enabled &&
+           (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+           (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) ==
+           (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) &&
+           (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == 
+           (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+               /*
+                * Send to the stack if:
+                **  - LRO not enabled, or
+                **  - no LRO resources, or
+                **  - lro enqueue fails
+                */
+               if (rxr->lro.lro_cnt != 0)
+                       if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                               return;
+       }
+       IGB_RX_UNLOCK(rxr);
+       (*ifp->if_input)(ifp, m);
+       IGB_RX_LOCK(rxr);
+}
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE if more to clean, FALSE otherwise
+ *********************************************************************/
+static bool
+igb_rxeof(struct igb_queue *que, int count, int *done)
+{
+       struct adapter          *adapter = que->adapter;
+       struct rx_ring          *rxr = que->rxr;
+       struct ifnet            *ifp = adapter->ifp;
+       struct lro_ctrl         *lro = &rxr->lro;
+       struct lro_entry        *queued;
+       int                     i, processed = 0, rxdone = 0;
+       u32                     ptype, staterr = 0;
+       union e1000_adv_rx_desc *cur;
+
+       IGB_RX_LOCK(rxr);
+       /* Sync the ring. */
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+       /* Main clean loop */
+       for (i = rxr->next_to_check; count != 0;) {
+               struct mbuf             *sendmp, *mh, *mp;
+               struct igb_rx_buf       *rxbuf;
+               u16                     hlen, plen, hdr, vtag;
+               bool                    eop = FALSE;
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+               if ((staterr & E1000_RXD_STAT_DD) == 0)
+                       break;
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               count--;
+               sendmp = mh = mp = NULL;
+               cur->wb.upper.status_error = 0;
+               rxbuf = &rxr->rx_buffers[i];
+               plen = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
+               if ((adapter->hw.mac.type == e1000_i350) &&
+                   (staterr & E1000_RXDEXT_STATERR_LB))
+                       vtag = be16toh(cur->wb.upper.vlan);
+               else
+                       vtag = le16toh(cur->wb.upper.vlan);
+               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+               eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
+
+               /* Make sure all segments of a bad packet are discarded */
+               if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) ||
+                   (rxr->discard)) {
+                       ifp->if_ierrors++;
+                       ++rxr->rx_discarded;
+                       if (!eop) /* Catch subsequent segs */
+                               rxr->discard = TRUE;
+                       else
+                               rxr->discard = FALSE;
+                       igb_rx_discard(rxr, i);
+                       goto next_desc;
+               }
+
+               /*
+               ** The way the hardware is configured to
+               ** split, it will ONLY use the header buffer
+               ** when header split is enabled, otherwise we
+               ** get normal behavior, ie, both header and
+               ** payload are DMA'd into the payload buffer.
+               **
+               ** The fmp test is to catch the case where a
+               ** packet spans multiple descriptors, in that
+               ** case only the first header is valid.
+               */
+               if (rxr->hdr_split && rxr->fmp == NULL) {
+                       hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >>
+                           E1000_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hlen > IGB_HDR_BUF)
+                               hlen = IGB_HDR_BUF;
+                       mh = rxr->rx_buffers[i].m_head;
+                       mh->m_len = hlen;
+                       /* clear buf pointer for refresh */
+                       rxbuf->m_head = NULL;
+                       /*
+                       ** Get the payload length, this
+                       ** could be zero if its a small
+                       ** packet.
+                       */
+                       if (plen > 0) {
+                               mp = rxr->rx_buffers[i].m_pack;
+                               mp->m_len = plen;
+                               mh->m_next = mp;
+                               /* clear buf pointer */
+                               rxbuf->m_pack = NULL;
+                               rxr->rx_split_packets++;
+                       }
+               } else {
+                       /*
+                       ** Either no header split, or a
+                       ** secondary piece of a fragmented
+                       ** split packet.
+                       */
+                       mh = rxr->rx_buffers[i].m_pack;
+                       mh->m_len = plen;
+                       /* clear buf info for refresh */
+                       rxbuf->m_pack = NULL;
+               }
+
+               ++processed; /* So we know when to refresh */
+
+               /* Initial frame - setup */
+               if (rxr->fmp == NULL) {
+                       mh->m_pkthdr.len = mh->m_len;
+                       /* Save the head of the chain */
+                       rxr->fmp = mh;
+                       rxr->lmp = mh;
+                       if (mp != NULL) {
+                               /* Add payload if split */
+                               mh->m_pkthdr.len += mp->m_len;
+                               rxr->lmp = mh->m_next;
+                       }
+               } else {
+                       /* Chain mbuf's together */
+                       rxr->lmp->m_next = mh;
+                       rxr->lmp = rxr->lmp->m_next;
+                       rxr->fmp->m_pkthdr.len += mh->m_len;
+               }
+
+               if (eop) {
+                       rxr->fmp->m_pkthdr.rcvif = ifp;
+                       ifp->if_ipackets++;
+                       rxr->rx_packets++;
+                       /* capture data for AIM */
+                       rxr->packets++;
+                       rxr->bytes += rxr->fmp->m_pkthdr.len;
+                       rxr->rx_bytes += rxr->fmp->m_pkthdr.len;
+
+                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+                               igb_rx_checksum(staterr, rxr->fmp, ptype);
+
+                       if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+                           (staterr & E1000_RXD_STAT_VP) != 0) {
+                               rxr->fmp->m_pkthdr.ether_vtag = vtag;
+                               rxr->fmp->m_flags |= M_VLANTAG;
+                       }
+#if __FreeBSD_version >= 800000
+                       rxr->fmp->m_pkthdr.flowid = que->msix;
+                       rxr->fmp->m_flags |= M_FLOWID;
+#endif
+                       sendmp = rxr->fmp;
+                       /* Make sure to set M_PKTHDR. */
+                       sendmp->m_flags |= M_PKTHDR;
+                       rxr->fmp = NULL;
+                       rxr->lmp = NULL;
+               }
+
+next_desc:
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+               /* Advance our pointers to the next descriptor. */
+               if (++i == adapter->num_rx_desc)
+                       i = 0;
+               /*
+               ** Send to the stack or LRO
+               */
+               if (sendmp != NULL) {
+                       rxr->next_to_check = i;
+                       igb_rx_input(rxr, ifp, sendmp, ptype);
+                       i = rxr->next_to_check;
+                       rxdone++;
+               }
+
+               /* Every 8 descriptors we go to refresh mbufs */
+               if (processed == 8) {
+                        igb_refresh_mbufs(rxr, i);
+                        processed = 0;
+               }
+       }
+
+       /* Catch any remainders */
+       if (igb_rx_unrefreshed(rxr))
+               igb_refresh_mbufs(rxr, i);
+
+       rxr->next_to_check = i;
+
+       /*
+        * Flush any outstanding LRO work
+        */
+       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+               SLIST_REMOVE_HEAD(&lro->lro_active, next);
+               tcp_lro_flush(lro, queued);
+       }
+
+       if (done != NULL)
+               *done = rxdone;
+
+       IGB_RX_UNLOCK(rxr);
+       return ((staterr & E1000_RXD_STAT_DD) ? TRUE : FALSE);
+}
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype)
+{
+       u16 status = (u16)staterr;
+       u8  errors = (u8) (staterr >> 24);
+       int sctp;
+
+       /* Ignore Checksum bit is set */
+       if (status & E1000_RXD_STAT_IXSM) {
+               mp->m_pkthdr.csum_flags = 0;
+               return;
+       }
+
+       if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)
+               sctp = 1;
+       else
+               sctp = 0;
+       if (status & E1000_RXD_STAT_IPCS) {
+               /* Did it pass? */
+               if (!(errors & E1000_RXD_ERR_IPE)) {
+                       /* IP Checksum Good */
+                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+               } else
+                       mp->m_pkthdr.csum_flags = 0;
+       }
+
+       if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) {
+               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+               if (sctp) /* reassign */
+                       type = CSUM_SCTP_VALID;
+#endif
+               /* Did it pass? */
+               if (!(errors & E1000_RXD_ERR_TCPE)) {
+                       mp->m_pkthdr.csum_flags |= type;
+                       if (sctp == 0)
+                               mp->m_pkthdr.csum_data = htons(0xffff);
+               }
+       }
+       return;
+}
+
+/*
+ * This routine is run via an vlan
+ * config EVENT
+ */
+static void
+igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u32             index, bit;
+
+       if (ifp->if_softc !=  arg)   /* Not our event */
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+                return;
+
+       IGB_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] |= (1 << bit);
+       ++adapter->num_vlans;
+       /* Change hw filter setting */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+               igb_setup_vlan_hw_support(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/*
+ * This routine is run via an vlan
+ * unconfig EVENT
+ */
+static void
+igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u32             index, bit;
+
+       if (ifp->if_softc !=  arg)
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+                return;
+
+       IGB_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] &= ~(1 << bit);
+       --adapter->num_vlans;
+       /* Change hw filter setting */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER)
+               igb_setup_vlan_hw_support(adapter);
+       IGB_CORE_UNLOCK(adapter);
+}
+
+static void
+igb_setup_vlan_hw_support(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct ifnet    *ifp = adapter->ifp;
+       u32             reg;
+
+       if (adapter->vf_ifp) {
+               e1000_rlpml_set_vf(hw,
+                   adapter->max_frame_size + VLAN_TAG_SIZE);
+               return;
+       }
+
+       reg = E1000_READ_REG(hw, E1000_CTRL);
+       reg |= E1000_CTRL_VME;
+       E1000_WRITE_REG(hw, E1000_CTRL, reg);
+
+       /* Enable the Filter Table */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+               reg = E1000_READ_REG(hw, E1000_RCTL);
+               reg &= ~E1000_RCTL_CFIEN;
+               reg |= E1000_RCTL_VFE;
+               E1000_WRITE_REG(hw, E1000_RCTL, reg);
+       }
+
+       /* Update the frame size */
+       E1000_WRITE_REG(&adapter->hw, E1000_RLPML,
+           adapter->max_frame_size + VLAN_TAG_SIZE);
+
+       /* Don't bother with table if no vlans */
+       if ((adapter->num_vlans == 0) ||
+           ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0))
+                return;
+       /*
+       ** A soft reset zero's out the VFTA, so
+       ** we need to repopulate it now.
+       */
+       for (int i = 0; i < IGB_VFTA_SIZE; i++)
+                if (adapter->shadow_vfta[i] != 0) {
+                       if (adapter->vf_ifp)
+                               e1000_vfta_set_vf(hw,
+                                   adapter->shadow_vfta[i], TRUE);
+                       else
+                               E1000_WRITE_REG_ARRAY(hw, E1000_VFTA,
+                                i, adapter->shadow_vfta[i]);
+               }
+}
+
+static void
+igb_enable_intr(struct adapter *adapter)
+{
+       /* With RSS set up what to auto clear */
+       if (adapter->msix_mem) {
+               u32 mask = (adapter->que_mask | adapter->link_mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAM, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMS, mask);
+               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+                   E1000_IMS_LSC);
+       } else {
+               E1000_WRITE_REG(&adapter->hw, E1000_IMS,
+                   IMS_ENABLE_MASK);
+       }
+       E1000_WRITE_FLUSH(&adapter->hw);
+
+       return;
+}
+
+static void
+igb_disable_intr(struct adapter *adapter)
+{
+       if (adapter->msix_mem) {
+               E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0);
+               E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0);
+       } 
+       E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
+       E1000_WRITE_FLUSH(&adapter->hw);
+       return;
+}
+
+/*
+ * Bit of a misnomer, what this really means is
+ * to enable OS management of the system... aka
+ * to disable special hardware management features 
+ */
+static void
+igb_init_manageability(struct adapter *adapter)
+{
+       if (adapter->has_manage) {
+               int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H);
+               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+               /* disable hardware interception of ARP */
+               manc &= ~(E1000_MANC_ARP_EN);
+
+                /* enable receiving management packets to the host */
+               manc |= E1000_MANC_EN_MNG2HOST;
+               manc2h |= 1 << 5;  /* Mng Port 623 */
+               manc2h |= 1 << 6;  /* Mng Port 664 */
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h);
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+       }
+}
+
+/*
+ * Give control back to hardware management
+ * controller if there is one.
+ */
+static void
+igb_release_manageability(struct adapter *adapter)
+{
+       if (adapter->has_manage) {
+               int manc = E1000_READ_REG(&adapter->hw, E1000_MANC);
+
+               /* re-enable hardware interception of ARP */
+               manc |= E1000_MANC_ARP_EN;
+               manc &= ~E1000_MANC_EN_MNG2HOST;
+
+               E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc);
+       }
+}
+
+/*
+ * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. 
+ *
+ */
+static void
+igb_get_hw_control(struct adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       if (adapter->vf_ifp)
+               return;
+
+       /* Let firmware know the driver has taken over */
+       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+           ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/*
+ * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded.
+ *
+ */
+static void
+igb_release_hw_control(struct adapter *adapter)
+{
+       u32 ctrl_ext;
+
+       if (adapter->vf_ifp)
+               return;
+
+       /* Let firmware taken over control of h/w */
+       ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+       E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+           ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+static int
+igb_is_valid_ether_addr(uint8_t *addr)
+{
+       char zero_addr[6] = { 0, 0, 0, 0, 0, 0 };
+
+       if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) {
+               return (FALSE);
+       }
+
+       return (TRUE);
+}
+
+
+/*
+ * Enable PCI Wake On Lan capability
+ */
+static void
+igb_enable_wakeup(device_t dev)
+{
+       u16     cap, status;
+       u8      id;
+
+       /* First find the capabilities pointer*/
+       cap = pci_read_config(dev, PCIR_CAP_PTR, 2);
+       /* Read the PM Capabilities */
+       id = pci_read_config(dev, cap, 1);
+       if (id != PCIY_PMG)     /* Something wrong */
+               return;
+       /* OK, we have the power capabilities, so
+          now get the status register */
+       cap += PCIR_POWER_STATUS;
+       status = pci_read_config(dev, cap, 2);
+       status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+       pci_write_config(dev, cap, status, 2);
+       return;
+}
+
+static void
+igb_led_func(void *arg, int onoff)
+{
+       struct adapter  *adapter = arg;
+
+       IGB_CORE_LOCK(adapter);
+       if (onoff) {
+               e1000_setup_led(&adapter->hw);
+               e1000_led_on(&adapter->hw);
+       } else {
+               e1000_led_off(&adapter->hw);
+               e1000_cleanup_led(&adapter->hw);
+       }
+       IGB_CORE_UNLOCK(adapter);
+}
+
+/**********************************************************************
+ *
+ *  Update the board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_update_stats_counters(struct adapter *adapter)
+{
+       struct ifnet            *ifp;
+        struct e1000_hw                *hw = &adapter->hw;
+       struct e1000_hw_stats   *stats;
+
+       /* 
+       ** The virtual function adapter has only a
+       ** small controlled set of stats, do only 
+       ** those and return.
+       */
+       if (adapter->vf_ifp) {
+               igb_update_vf_stats_counters(adapter);
+               return;
+       }
+
+       stats = (struct e1000_hw_stats  *)adapter->stats;
+
+       if(adapter->hw.phy.media_type == e1000_media_type_copper ||
+          (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) {
+               stats->symerrs +=
+                   E1000_READ_REG(hw,E1000_SYMERRS);
+               stats->sec += E1000_READ_REG(hw, E1000_SEC);
+       }
+
+       stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS);
+       stats->mpc += E1000_READ_REG(hw, E1000_MPC);
+       stats->scc += E1000_READ_REG(hw, E1000_SCC);
+       stats->ecol += E1000_READ_REG(hw, E1000_ECOL);
+
+       stats->mcc += E1000_READ_REG(hw, E1000_MCC);
+       stats->latecol += E1000_READ_REG(hw, E1000_LATECOL);
+       stats->colc += E1000_READ_REG(hw, E1000_COLC);
+       stats->dc += E1000_READ_REG(hw, E1000_DC);
+       stats->rlec += E1000_READ_REG(hw, E1000_RLEC);
+       stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC);
+       stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC);
+       /*
+       ** For watchdog management we need to know if we have been
+       ** paused during the last interval, so capture that here.
+       */ 
+        adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC);
+        stats->xoffrxc += adapter->pause_frames;
+       stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC);
+       stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC);
+       stats->prc64 += E1000_READ_REG(hw, E1000_PRC64);
+       stats->prc127 += E1000_READ_REG(hw, E1000_PRC127);
+       stats->prc255 += E1000_READ_REG(hw, E1000_PRC255);
+       stats->prc511 += E1000_READ_REG(hw, E1000_PRC511);
+       stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023);
+       stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522);
+       stats->gprc += E1000_READ_REG(hw, E1000_GPRC);
+       stats->bprc += E1000_READ_REG(hw, E1000_BPRC);
+       stats->mprc += E1000_READ_REG(hw, E1000_MPRC);
+       stats->gptc += E1000_READ_REG(hw, E1000_GPTC);
+
+       /* For the 64-bit byte counters the low dword must be read first. */
+       /* Both registers clear on the read of the high dword */
+
+       stats->gorc += E1000_READ_REG(hw, E1000_GORCL) +
+           ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32);
+       stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) +
+           ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32);
+
+       stats->rnbc += E1000_READ_REG(hw, E1000_RNBC);
+       stats->ruc += E1000_READ_REG(hw, E1000_RUC);
+       stats->rfc += E1000_READ_REG(hw, E1000_RFC);
+       stats->roc += E1000_READ_REG(hw, E1000_ROC);
+       stats->rjc += E1000_READ_REG(hw, E1000_RJC);
+
+       stats->tor += E1000_READ_REG(hw, E1000_TORH);
+       stats->tot += E1000_READ_REG(hw, E1000_TOTH);
+
+       stats->tpr += E1000_READ_REG(hw, E1000_TPR);
+       stats->tpt += E1000_READ_REG(hw, E1000_TPT);
+       stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64);
+       stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127);
+       stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255);
+       stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511);
+       stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023);
+       stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522);
+       stats->mptc += E1000_READ_REG(hw, E1000_MPTC);
+       stats->bptc += E1000_READ_REG(hw, E1000_BPTC);
+
+       /* Interrupt Counts */
+
+       stats->iac += E1000_READ_REG(hw, E1000_IAC);
+       stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC);
+       stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC);
+       stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC);
+       stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC);
+       stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC);
+       stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC);
+       stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC);
+       stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC);
+
+       /* Host to Card Statistics */
+
+       stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC);
+       stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC);
+       stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC);
+       stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC);
+       stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC);
+       stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC);
+       stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC);
+       stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) +
+           ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32));
+       stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) +
+           ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32));
+       stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS);
+       stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC);
+       stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC);
+
+       stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC);
+       stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC);
+       stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS);
+       stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR);
+       stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC);
+       stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC);
+
+       ifp = adapter->ifp;
+       ifp->if_collisions = stats->colc;
+
+       /* Rx Errors */
+       ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc +
+           stats->crcerrs + stats->algnerrc +
+           stats->ruc + stats->roc + stats->mpc + stats->cexterr;
+
+       /* Tx Errors */
+       ifp->if_oerrors = stats->ecol +
+           stats->latecol + adapter->watchdog_events;
+
+       /* Driver specific counters */
+       adapter->device_control = E1000_READ_REG(hw, E1000_CTRL);
+       adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL);
+       adapter->int_mask = E1000_READ_REG(hw, E1000_IMS);
+       adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS);
+       adapter->packet_buf_alloc_tx =
+           ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16);
+       adapter->packet_buf_alloc_rx =
+           (E1000_READ_REG(hw, E1000_PBA) & 0xffff);
+}
+
+
+/**********************************************************************
+ *
+ *  Initialize the VF board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_vf_init_stats(struct adapter *adapter)
+{
+        struct e1000_hw *hw = &adapter->hw;
+       struct e1000_vf_stats   *stats;
+
+       stats = (struct e1000_vf_stats  *)adapter->stats;
+       if (stats == NULL)
+               return;
+        stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC);
+        stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC);
+        stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC);
+        stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC);
+        stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC);
+}
+/**********************************************************************
+ *
+ *  Update the VF board statistics counters.
+ *
+ **********************************************************************/
+static void
+igb_update_vf_stats_counters(struct adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       struct e1000_vf_stats   *stats;
+
+       if (adapter->link_speed == 0)
+               return;
+
+       stats = (struct e1000_vf_stats  *)adapter->stats;
+
+       UPDATE_VF_REG(E1000_VFGPRC,
+           stats->last_gprc, stats->gprc);
+       UPDATE_VF_REG(E1000_VFGORC,
+           stats->last_gorc, stats->gorc);
+       UPDATE_VF_REG(E1000_VFGPTC,
+           stats->last_gptc, stats->gptc);
+       UPDATE_VF_REG(E1000_VFGOTC,
+           stats->last_gotc, stats->gotc);
+       UPDATE_VF_REG(E1000_VFMPRC,
+           stats->last_mprc, stats->mprc);
+}
+
+/* Export a single 32-bit register via a read-only sysctl. */
+static int
+igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
+{
+       struct adapter *adapter;
+       u_int val;
+
+       adapter = oidp->oid_arg1;
+       val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2);
+       return (sysctl_handle_int(oidp, &val, 0, req));
+}
+
+/*
+**  Tuneable interrupt rate handler
+*/
+static int
+igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+       struct igb_queue        *que = ((struct igb_queue *)oidp->oid_arg1);
+       int                     error;
+       u32                     reg, usec, rate;
+                        
+       reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix));
+       usec = ((reg & 0x7FFC) >> 2);
+       if (usec > 0)
+               rate = 1000000 / usec;
+       else
+               rate = 0;
+       error = sysctl_handle_int(oidp, &rate, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+igb_add_hw_stats(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+
+       struct tx_ring *txr = adapter->tx_rings;
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+       struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+       struct e1000_hw_stats *stats = adapter->stats;
+
+       struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node;
+       struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list;
+
+#define QUEUE_NAME_LEN 32
+       char namebuf[QUEUE_NAME_LEN];
+
+       /* Driver Statistics */
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", 
+                       CTLFLAG_RD, &adapter->link_irq, 0,
+                       "Link MSIX IRQ Handled");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", 
+                       CTLFLAG_RD, &adapter->dropped_pkts,
+                       "Driver dropped packets");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", 
+                       CTLFLAG_RD, &adapter->no_tx_dma_setup,
+                       "Driver tx dma failure in xmit");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
+                       CTLFLAG_RD, &adapter->rx_overruns,
+                       "RX overruns");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
+                       CTLFLAG_RD, &adapter->watchdog_events,
+                       "Watchdog timeouts");
+
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", 
+                       CTLFLAG_RD, &adapter->device_control,
+                       "Device Control Register");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", 
+                       CTLFLAG_RD, &adapter->rx_control,
+                       "Receiver Control Register");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", 
+                       CTLFLAG_RD, &adapter->int_mask,
+                       "Interrupt Mask");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", 
+                       CTLFLAG_RD, &adapter->eint_mask,
+                       "Extended Interrupt Mask");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", 
+                       CTLFLAG_RD, &adapter->packet_buf_alloc_tx,
+                       "Transmit Buffer Packet Allocation");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", 
+                       CTLFLAG_RD, &adapter->packet_buf_alloc_rx,
+                       "Receive Buffer Packet Allocation");
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
+                       CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
+                       "Flow Control High Watermark");
+       SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", 
+                       CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
+                       "Flow Control Low Watermark");
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) {
+               struct lro_ctrl *lro = &rxr->lro;
+
+               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", 
+                               CTLFLAG_RD, &adapter->queues[i],
+                               sizeof(&adapter->queues[i]),
+                               igb_sysctl_interrupt_rate_handler,
+                               "IU", "Interrupt Rate");
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
+                               CTLFLAG_RD, adapter, E1000_TDH(txr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Transmit Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
+                               CTLFLAG_RD, adapter, E1000_TDT(txr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Transmit Descriptor Tail");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", 
+                               CTLFLAG_RD, &txr->no_desc_avail,
+                               "Queue No Descriptor Available");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+                               CTLFLAG_RD, &txr->tx_packets,
+                               "Queue Packets Transmitted");
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
+                               CTLFLAG_RD, adapter, E1000_RDH(rxr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Receive Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
+                               CTLFLAG_RD, adapter, E1000_RDT(rxr->me),
+                               igb_sysctl_reg_handler, "IU",
+                               "Receive Descriptor Tail");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+                               CTLFLAG_RD, &rxr->rx_packets,
+                               "Queue Packets Received");
+               SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+                               CTLFLAG_RD, &rxr->rx_bytes,
+                               "Queue Bytes Received");
+               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued",
+                               CTLFLAG_RD, &lro->lro_queued, 0,
+                               "LRO Queued");
+               SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed",
+                               CTLFLAG_RD, &lro->lro_flushed, 0,
+                               "LRO Flushed");
+       }
+
+       /* MAC stats get their own sub node */
+
+       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
+                                   CTLFLAG_RD, NULL, "MAC Statistics");
+       stat_list = SYSCTL_CHILDREN(stat_node);
+
+       /*
+       ** VF adapter has a very limited set of stats
+       ** since its not managing the metal, so to speak.
+       */
+       if (adapter->vf_ifp) {
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+               return;
+       }
+
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", 
+                       CTLFLAG_RD, &stats->ecol,
+                       "Excessive collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", 
+                       CTLFLAG_RD, &stats->scc,
+                       "Single collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", 
+                       CTLFLAG_RD, &stats->mcc,
+                       "Multiple collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", 
+                       CTLFLAG_RD, &stats->latecol,
+                       "Late collisions");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", 
+                       CTLFLAG_RD, &stats->colc,
+                       "Collision Count");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
+                       CTLFLAG_RD, &stats->symerrs,
+                       "Symbol Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
+                       CTLFLAG_RD, &stats->sec,
+                       "Sequence Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count",
+                       CTLFLAG_RD, &stats->dc,
+                       "Defer Count");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets",
+                       CTLFLAG_RD, &stats->mpc,
+                       "Missed Packets");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
+                       CTLFLAG_RD, &stats->rnbc,
+                       "Receive No Buffers");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
+                       CTLFLAG_RD, &stats->ruc,
+                       "Receive Undersize");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+                       CTLFLAG_RD, &stats->rfc,
+                       "Fragmented Packets Received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
+                       CTLFLAG_RD, &stats->roc,
+                       "Oversized Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
+                       CTLFLAG_RD, &stats->rjc,
+                       "Recevied Jabber");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs",
+                       CTLFLAG_RD, &stats->rxerrc,
+                       "Receive Errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+                       CTLFLAG_RD, &stats->crcerrs,
+                       "CRC errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
+                       CTLFLAG_RD, &stats->algnerrc,
+                       "Alignment Errors");
+       /* On 82575 these are collision counts */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
+                       CTLFLAG_RD, &stats->cexterr,
+                       "Collision/Carrier extension errors");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
+                       CTLFLAG_RD, &stats->xonrxc,
+                       "XON Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd",
+                       CTLFLAG_RD, &stats->xontxc,
+                       "XON Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
+                       CTLFLAG_RD, &stats->xoffrxc,
+                       "XOFF Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
+                       CTLFLAG_RD, &stats->xofftxc,
+                       "XOFF Transmitted");
+       /* Packet Reception Stats */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
+                       CTLFLAG_RD, &stats->tpr,
+                       "Total Packets Received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->bprc,
+                       "Broadcast Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+                       CTLFLAG_RD, &stats->prc64,
+                       "64 byte frames received ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+                       CTLFLAG_RD, &stats->prc127,
+                       "65-127 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+                       CTLFLAG_RD, &stats->prc255,
+                       "128-255 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+                       CTLFLAG_RD, &stats->prc511,
+                       "256-511 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+                       CTLFLAG_RD, &stats->prc1023,
+                       "512-1023 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->prc1522,
+                       "1023-1522 byte frames received");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", 
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+
+       /* Packet Transmission Stats */
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", 
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+                       CTLFLAG_RD, &stats->tpt,
+                       "Total Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+                       CTLFLAG_RD, &stats->bptc,
+                       "Broadcast Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+                       CTLFLAG_RD, &stats->mptc,
+                       "Multicast Packets Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+                       CTLFLAG_RD, &stats->ptc64,
+                       "64 byte frames transmitted ");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+                       CTLFLAG_RD, &stats->ptc127,
+                       "65-127 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+                       CTLFLAG_RD, &stats->ptc255,
+                       "128-255 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+                       CTLFLAG_RD, &stats->ptc511,
+                       "256-511 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+                       CTLFLAG_RD, &stats->ptc1023,
+                       "512-1023 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->ptc1522,
+                       "1024-1522 byte frames transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd",
+                       CTLFLAG_RD, &stats->tsctc,
+                       "TSO Contexts Transmitted");
+       SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
+                       CTLFLAG_RD, &stats->tsctfc,
+                       "TSO Contexts Failed");
+
+
+       /* Interrupt Stats */
+
+       int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", 
+                                   CTLFLAG_RD, NULL, "Interrupt Statistics");
+       int_list = SYSCTL_CHILDREN(int_node);
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts",
+                       CTLFLAG_RD, &stats->iac,
+                       "Interrupt Assertion Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
+                       CTLFLAG_RD, &stats->icrxptc,
+                       "Interrupt Cause Rx Pkt Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
+                       CTLFLAG_RD, &stats->icrxatc,
+                       "Interrupt Cause Rx Abs Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
+                       CTLFLAG_RD, &stats->ictxptc,
+                       "Interrupt Cause Tx Pkt Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
+                       CTLFLAG_RD, &stats->ictxatc,
+                       "Interrupt Cause Tx Abs Timer Expire Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
+                       CTLFLAG_RD, &stats->ictxqec,
+                       "Interrupt Cause Tx Queue Empty Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
+                       CTLFLAG_RD, &stats->ictxqmtc,
+                       "Interrupt Cause Tx Queue Min Thresh Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
+                       CTLFLAG_RD, &stats->icrxdmtc,
+                       "Interrupt Cause Rx Desc Min Thresh Count");
+
+       SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun",
+                       CTLFLAG_RD, &stats->icrxoc,
+                       "Interrupt Cause Receiver Overrun Count");
+
+       /* Host to Card Stats */
+
+       host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", 
+                                   CTLFLAG_RD, NULL, 
+                                   "Host to Card Statistics");
+
+       host_list = SYSCTL_CHILDREN(host_node);
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt",
+                       CTLFLAG_RD, &stats->cbtmpc,
+                       "Circuit Breaker Tx Packet Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard",
+                       CTLFLAG_RD, &stats->htdpmc,
+                       "Host Transmit Discarded Packets");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt",
+                       CTLFLAG_RD, &stats->rpthc,
+                       "Rx Packets To Host");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts",
+                       CTLFLAG_RD, &stats->cbrmpc,
+                       "Circuit Breaker Rx Packet Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop",
+                       CTLFLAG_RD, &stats->cbrdpc,
+                       "Circuit Breaker Rx Dropped Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt",
+                       CTLFLAG_RD, &stats->hgptc,
+                       "Host Good Packets Tx Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop",
+                       CTLFLAG_RD, &stats->htcbdpc,
+                       "Host Tx Circuit Breaker Dropped Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes",
+                       CTLFLAG_RD, &stats->hgorc,
+                       "Host Good Octets Received Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes",
+                       CTLFLAG_RD, &stats->hgotc,
+                       "Host Good Octets Transmit Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors",
+                       CTLFLAG_RD, &stats->lenerrs,
+                       "Length Errors");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt",
+                       CTLFLAG_RD, &stats->scvpc,
+                       "SerDes/SGMII Code Violation Pkt Count");
+
+       SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed",
+                       CTLFLAG_RD, &stats->hrmpc,
+                       "Header Redirection Missed Packet Count");
+}
+
+
+/**********************************************************************
+ *
+ *  This routine provides a way to dump out the adapter eeprom,
+ *  often a useful debug/service tool. This only dumps the first
+ *  32 words, stuff that matters is in that extent.
+ *
+ **********************************************************************/
+static int
+igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
+{
+       struct adapter *adapter;
+       int error;
+       int result;
+
+       result = -1;
+       error = sysctl_handle_int(oidp, &result, 0, req);
+
+       if (error || !req->newptr)
+               return (error);
+
+       /*
+        * This value will cause a hex dump of the
+        * first 32 16-bit words of the EEPROM to
+        * the screen.
+        */
+       if (result == 1) {
+               adapter = (struct adapter *)arg1;
+               igb_print_nvm_info(adapter);
+        }
+
+       return (error);
+}
+
+static void
+igb_print_nvm_info(struct adapter *adapter)
+{
+       u16     eeprom_data;
+       int     i, j, row = 0;
+
+       /* Its a bit crude, but it gets the job done */
+       printf("\nInterface EEPROM Dump:\n");
+       printf("Offset\n0x0000  ");
+       for (i = 0, j = 0; i < 32; i++, j++) {
+               if (j == 8) { /* Make the offset block */
+                       j = 0; ++row;
+                       printf("\n0x00%x0  ",row);
+               }
+               e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data);
+               printf("%04x ", eeprom_data);
+       }
+       printf("\n");
+}
+
+static void
+igb_set_sysctl_value(struct adapter *adapter, const char *name,
+       const char *description, int *limit, int value)
+{
+       *limit = value;
+       SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+           SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+           OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+**     0 - off
+**     1 - rx pause
+**     2 - tx pause
+**     3 - full
+*/
+static int
+igb_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       struct adapter *adapter;
+
+       error = sysctl_handle_int(oidp, &igb_fc_setting, 0, req);
+
+       if (error)
+               return (error);
+
+       adapter = (struct adapter *) arg1;
+       switch (igb_fc_setting) {
+               case e1000_fc_rx_pause:
+               case e1000_fc_tx_pause:
+               case e1000_fc_full:
+                       adapter->hw.fc.requested_mode = igb_fc_setting;
+                       break;
+               case e1000_fc_none:
+               default:
+                       adapter->hw.fc.requested_mode = e1000_fc_none;
+       }
+
+       adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
+       e1000_force_mac_fc(&adapter->hw);
+       return error;
+}
diff --git a/lib/librte_pmd_igb/igb/if_igb.h b/lib/librte_pmd_igb/igb/if_igb.h
new file mode 100644 (file)
index 0000000..9a0bb47
--- /dev/null
@@ -0,0 +1,541 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2011, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IGB_H_DEFINED_
+#define _IGB_H_DEFINED_
+
+/* Tunables */
+
+/*
+ * IGB_TXD: Maximum number of Transmit Descriptors
+ *
+ *   This value is the number of transmit descriptors allocated by the driver.
+ *   Increasing this value allows the driver to queue more transmits. Each
+ *   descriptor is 16 bytes.
+ *   Since TDLEN should be multiple of 128bytes, the number of transmit
+ *   desscriptors should meet the following condition.
+ *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_TXD            256
+#define IGB_DEFAULT_TXD                1024
+#define IGB_MAX_TXD            4096
+
+/*
+ * IGB_RXD: Maximum number of Transmit Descriptors
+ *
+ *   This value is the number of receive descriptors allocated by the driver.
+ *   Increasing this value allows the driver to buffer more incoming packets.
+ *   Each descriptor is 16 bytes.  A receive buffer is also allocated for each
+ *   descriptor. The maximum MTU size is 16110.
+ *   Since TDLEN should be multiple of 128bytes, the number of transmit
+ *   desscriptors should meet the following condition.
+ *      (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0
+ */
+#define IGB_MIN_RXD            256
+#define IGB_DEFAULT_RXD                1024
+#define IGB_MAX_RXD            4096
+
+/*
+ * IGB_TIDV - Transmit Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value delays the generation of transmit interrupts in units of
+ *   1.024 microseconds. Transmit interrupt reduction can improve CPU
+ *   efficiency if properly tuned for specific network traffic. If the
+ *   system is reporting dropped transmits, this value may be set too high
+ *   causing the driver to run out of available transmit descriptors.
+ */
+#define IGB_TIDV                         64
+
+/*
+ * IGB_TADV - Transmit Absolute Interrupt Delay Value
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value, in units of 1.024 microseconds, limits the delay in which a
+ *   transmit interrupt is generated. Useful only if IGB_TIDV is non-zero,
+ *   this value ensures that an interrupt is generated after the initial
+ *   packet is sent on the wire within the set amount of time.  Proper tuning,
+ *   along with IGB_TIDV, may improve traffic throughput in specific
+ *   network conditions.
+ */
+#define IGB_TADV                         64
+
+/*
+ * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer)
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 0
+ *   This value delays the generation of receive interrupts in units of 1.024
+ *   microseconds.  Receive interrupt reduction can improve CPU efficiency if
+ *   properly tuned for specific network traffic. Increasing this value adds
+ *   extra latency to frame reception and can end up decreasing the throughput
+ *   of TCP traffic. If the system is reporting dropped receives, this value
+ *   may be set too high, causing the driver to run out of available receive
+ *   descriptors.
+ *
+ *   CAUTION: When setting IGB_RDTR to a value other than 0, adapters
+ *            may hang (stop transmitting) under certain network conditions.
+ *            If this occurs a WATCHDOG message is logged in the system
+ *            event log. In addition, the controller is automatically reset,
+ *            restoring the network connection. To eliminate the potential
+ *            for the hang ensure that IGB_RDTR is set to 0.
+ */
+#define IGB_RDTR                         0
+
+/*
+ * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544)
+ * Valid Range: 0-65535 (0=off)
+ * Default Value: 64
+ *   This value, in units of 1.024 microseconds, limits the delay in which a
+ *   receive interrupt is generated. Useful only if IGB_RDTR is non-zero,
+ *   this value ensures that an interrupt is generated after the initial
+ *   packet is received within the set amount of time.  Proper tuning,
+ *   along with IGB_RDTR, may improve traffic throughput in specific network
+ *   conditions.
+ */
+#define IGB_RADV                         64
+
+/*
+ * This parameter controls the duration of transmit watchdog timer.
+ */
+#define IGB_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameter controls when the driver calls the routine to reclaim
+ * transmit descriptors. Cleaning earlier seems a win.
+ */
+#define IGB_TX_CLEANUP_THRESHOLD       (adapter->num_tx_desc / 2)
+
+/*
+ * This parameter controls whether or not autonegotation is enabled.
+ *              0 - Disable autonegotiation
+ *              1 - Enable  autonegotiation
+ */
+#define DO_AUTO_NEG                     1
+
+/*
+ * This parameter control whether or not the driver will wait for
+ * autonegotiation to complete.
+ *              1 - Wait for autonegotiation to complete
+ *              0 - Don't wait for autonegotiation to complete
+ */
+#define WAIT_FOR_AUTO_NEG_DEFAULT       0
+
+/* Tunables -- End */
+
+#define AUTONEG_ADV_DEFAULT    (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \
+                               ADVERTISE_100_HALF | ADVERTISE_100_FULL | \
+                               ADVERTISE_1000_FULL)
+
+#define AUTO_ALL_MODES         0
+
+/* PHY master/slave setting */
+#define IGB_MASTER_SLAVE               e1000_ms_hw_default
+
+/*
+ * Micellaneous constants
+ */
+#define IGB_VENDOR_ID                  0x8086
+
+#define IGB_JUMBO_PBA                  0x00000028
+#define IGB_DEFAULT_PBA                        0x00000030
+#define IGB_SMARTSPEED_DOWNSHIFT       3
+#define IGB_SMARTSPEED_MAX             15
+#define IGB_MAX_LOOP                   10
+
+#define IGB_RX_PTHRESH                 (hw->mac.type <= e1000_82576 ? 16 : 8)
+#define IGB_RX_HTHRESH                 8
+#define IGB_RX_WTHRESH                 1
+
+#define IGB_TX_PTHRESH                 8
+#define IGB_TX_HTHRESH                 1
+#define IGB_TX_WTHRESH                 ((hw->mac.type != e1000_82575 && \
+                                          adapter->msix_mem) ? 1 : 16)
+
+#define MAX_NUM_MULTICAST_ADDRESSES     128
+#define PCI_ANY_ID                      (~0U)
+#define ETHER_ALIGN                     2
+#define IGB_TX_BUFFER_SIZE             ((uint32_t) 1514)
+#define IGB_FC_PAUSE_TIME              0x0680
+#define IGB_EEPROM_APME                        0x400;
+#define IGB_QUEUE_IDLE                 0
+#define IGB_QUEUE_WORKING              1
+#define IGB_QUEUE_HUNG                 2
+
+/*
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IGB_DBA_ALIGN                  128
+
+#define SPEED_MODE_BIT (1<<21)         /* On PCI-E MACs only */
+
+/* PCI Config defines */
+#define IGB_MSIX_BAR           3
+
+/* Defines for printing debug information */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  printf(S "\n")
+#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S)              if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) printf(S "\n", A, B)
+
+#define IGB_MAX_SCATTER                64
+#define IGB_VFTA_SIZE          128
+#define IGB_BR_SIZE            4096    /* ring buf size */
+#define IGB_TSO_SIZE           (65535 + sizeof(struct ether_vlan_header))
+#define IGB_TSO_SEG_SIZE       4096    /* Max dma segment size */
+#define IGB_HDR_BUF            128
+#define IGB_PKTTYPE_MASK       0x0000FFF0
+#define ETH_ZLEN               60
+#define ETH_ADDR_LEN           6
+
+/* Offload bits in mbuf flag */
+#if __FreeBSD_version >= 800000
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#else
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
+#endif
+
+/* Define the starting Interrupt rate per Queue */
+#define IGB_INTS_PER_SEC        8000
+#define IGB_DEFAULT_ITR         ((1000000/IGB_INTS_PER_SEC) << 2)
+
+#define IGB_LINK_ITR            2000
+
+/* Precision Time Sync (IEEE 1588) defines */
+#define ETHERTYPE_IEEE1588     0x88F7
+#define PICOSECS_PER_TICK      20833
+#define TSYNC_PORT             319 /* UDP port for the protocol */
+
+/*
+ * Bus dma allocation structure used by
+ * e1000_dma_malloc and e1000_dma_free.
+ */
+struct igb_dma_alloc {
+        bus_addr_t              dma_paddr;
+        caddr_t                 dma_vaddr;
+        bus_dma_tag_t           dma_tag;
+        bus_dmamap_t            dma_map;
+        bus_dma_segment_t       dma_seg;
+        int                     dma_nseg;
+};
+
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring.
+*/
+struct igb_queue {
+       struct adapter          *adapter;
+       u32                     msix;           /* This queue's MSIX vector */
+       u32                     eims;           /* This queue's EIMS bit */
+       u32                     eitr_setting;
+       struct resource         *res;
+       void                    *tag;
+       struct tx_ring          *txr;
+       struct rx_ring          *rxr;
+       struct task             que_task;
+       struct taskqueue        *tq;
+       u64                     irqs;
+};
+
+/*
+ * Transmit ring: one per queue
+ */
+struct tx_ring {
+       struct adapter          *adapter;
+       u32                     me;
+       struct mtx              tx_mtx;
+       char                    mtx_name[16];
+       struct igb_dma_alloc    txdma;
+       struct e1000_tx_desc    *tx_base;
+       u32                     next_avail_desc;
+       u32                     next_to_clean;
+       volatile u16            tx_avail;
+       struct igb_tx_buffer    *tx_buffers;
+#if __FreeBSD_version >= 800000
+       struct buf_ring         *br;
+#endif
+       bus_dma_tag_t           txtag;
+
+       u32                     bytes;
+       u32                     packets;
+
+       int                     queue_status;
+       int                     watchdog_time;
+       int                     tdt;
+       int                     tdh;
+       u64                     no_desc_avail;
+       u64                     tx_packets;
+};
+
+/*
+ * Receive ring: one per queue
+ */
+struct rx_ring {
+       struct adapter          *adapter;
+       u32                     me;
+       struct igb_dma_alloc    rxdma;
+       union e1000_adv_rx_desc *rx_base;
+       struct lro_ctrl         lro;
+       bool                    lro_enabled;
+       bool                    hdr_split;
+       bool                    discard;
+       struct mtx              rx_mtx;
+       char                    mtx_name[16];
+       u32                     next_to_refresh;
+       u32                     next_to_check;
+       struct igb_rx_buf       *rx_buffers;
+       bus_dma_tag_t           htag;           /* dma tag for rx head */
+       bus_dma_tag_t           ptag;           /* dma tag for rx packet */
+       /*
+        * First/last mbuf pointers, for
+        * collecting multisegment RX packets.
+        */
+       struct mbuf            *fmp;
+       struct mbuf            *lmp;
+
+       u32                     bytes;
+       u32                     packets;
+       int                     rdt;
+       int                     rdh;
+
+       /* Soft stats */
+       u64                     rx_split_packets;
+       u64                     rx_discarded;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+};
+
+struct adapter {
+       struct ifnet    *ifp;
+       struct e1000_hw hw;
+
+       struct e1000_osdep osdep;
+       struct device   *dev;
+       struct cdev     *led_dev;
+
+       struct resource *pci_mem;
+       struct resource *msix_mem;
+       struct resource *res;
+       void            *tag;
+       u32             que_mask;
+
+       int             linkvec;
+       int             link_mask;
+       struct task     link_task;
+       int             link_irq;
+
+       struct ifmedia  media;
+       struct callout  timer;
+       int             msix;   /* total vectors allocated */
+       int             if_flags;
+       int             max_frame_size;
+       int             min_frame_size;
+       int             pause_frames;
+       struct mtx      core_mtx;
+       int             igb_insert_vlan_header;
+        u16            num_queues;
+       u16             vf_ifp;  /* a VF interface */
+
+       eventhandler_tag vlan_attach;
+       eventhandler_tag vlan_detach;
+       u32             num_vlans;
+
+       /* Management and WOL features */
+       int             wol;
+       int             has_manage;
+
+       /*
+       ** Shadow VFTA table, this is needed because
+       ** the real vlan filter table gets cleared during
+       ** a soft reset and the driver needs to be able
+       ** to repopulate it.
+       */
+       u32             shadow_vfta[IGB_VFTA_SIZE];
+
+       /* Info about the interface */
+       u8              link_active;
+       u16             link_speed;
+       u16             link_duplex;
+       u32             smartspeed;
+       u32             dma_coalesce;
+
+       /* Interface queues */
+       struct igb_queue        *queues;
+
+       /*
+        * Transmit rings
+        */
+       struct tx_ring          *tx_rings;
+        u16                    num_tx_desc;
+
+       /* Multicast array pointer */
+       u8                      *mta;
+
+       /* 
+        * Receive rings
+        */
+       struct rx_ring          *rx_rings;
+       bool                    rx_hdr_split;
+        u16                    num_rx_desc;
+       int                     rx_process_limit;
+       u32                     rx_mbuf_sz;
+       u32                     rx_mask;
+
+       /* Misc stats maintained by the driver */
+       unsigned long   dropped_pkts;
+       unsigned long   mbuf_defrag_failed;
+       unsigned long   mbuf_header_failed;
+       unsigned long   mbuf_packet_failed;
+       unsigned long   no_tx_map_avail;
+        unsigned long  no_tx_dma_setup;
+       unsigned long   watchdog_events;
+       unsigned long   rx_overruns;
+       unsigned long   device_control;
+       unsigned long   rx_control;
+       unsigned long   int_mask;
+       unsigned long   eint_mask;
+       unsigned long   packet_buf_alloc_rx;
+       unsigned long   packet_buf_alloc_tx;
+
+       boolean_t       in_detach;
+
+#ifdef IGB_IEEE1588
+       /* IEEE 1588 precision time support */
+       struct cyclecounter     cycles;
+       struct nettimer         clock;
+       struct nettime_compare  compare;
+       struct hwtstamp_ctrl    hwtstamp;
+#endif
+
+       void                    *stats;
+};
+
+/* ******************************************************************************
+ * vendor_info_array
+ *
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ *
+ * ******************************************************************************/
+typedef struct _igb_vendor_info_t {
+       unsigned int vendor_id;
+       unsigned int device_id;
+       unsigned int subvendor_id;
+       unsigned int subdevice_id;
+       unsigned int index;
+} igb_vendor_info_t;
+
+
+struct igb_tx_buffer {
+       int             next_eop;  /* Index of the desc to watch */
+        struct mbuf    *m_head;
+        bus_dmamap_t    map;         /* bus_dma map for packet */
+};
+
+struct igb_rx_buf {
+        struct mbuf    *m_head;
+        struct mbuf    *m_pack;
+       bus_dmamap_t    hmap;   /* bus_dma map for header */
+       bus_dmamap_t    pmap;   /* bus_dma map for packet */
+};
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+igb_rx_unrefreshed(struct rx_ring *rxr)
+{
+       struct adapter  *adapter = rxr->adapter;
+       if (rxr->next_to_check > rxr->next_to_refresh)
+               return (rxr->next_to_check - rxr->next_to_refresh - 1);
+       else
+               return ((adapter->num_rx_desc + rxr->next_to_check) -
+                   rxr->next_to_refresh - 1);
+}
+
+#define        IGB_CORE_LOCK_INIT(_sc, _name) \
+       mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF)
+#define        IGB_CORE_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->core_mtx)
+#define        IGB_CORE_LOCK(_sc)              mtx_lock(&(_sc)->core_mtx)
+#define        IGB_CORE_UNLOCK(_sc)            mtx_unlock(&(_sc)->core_mtx)
+#define        IGB_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+
+#define        IGB_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->tx_mtx)
+#define        IGB_TX_LOCK(_sc)                mtx_lock(&(_sc)->tx_mtx)
+#define        IGB_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->tx_mtx)
+#define        IGB_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->tx_mtx)
+#define        IGB_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+#define        IGB_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->rx_mtx)
+#define        IGB_RX_LOCK(_sc)                mtx_lock(&(_sc)->rx_mtx)
+#define        IGB_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->rx_mtx)
+#define        IGB_RX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->rx_mtx, MA_OWNED)
+
+#define UPDATE_VF_REG(reg, last, cur)          \
+{                                              \
+       u32 new = E1000_READ_REG(hw, reg);      \
+       if (new < last)                         \
+               cur += 0x100000000LL;           \
+       last = new;                             \
+       cur &= 0xFFFFFFFF00000000LL;            \
+       cur |= new;                             \
+}
+
+#if __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+       if (ALTQ_IS_ENABLED(&ifp->if_snd))
+               return (1);
+#endif
+       return (!buf_ring_empty(br));
+}
+#endif
+
+#endif /* _IGB_H_DEFINED_ */
+
+
diff --git a/lib/librte_pmd_ixgbe/Makefile b/lib/librte_pmd_ixgbe/Makefile
new file mode 100644 (file)
index 0000000..1fa9def
--- /dev/null
@@ -0,0 +1,65 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_ixgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_common.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82598.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_82599.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_x540.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_phy.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_api.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_vf.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe/ixgbe_mbx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c
+
+
+# this lib depends upon:
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_eal lib/librte_ether
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_mempool lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += lib/librte_net lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_pmd_ixgbe/ixgbe/README b/lib/librte_pmd_ixgbe/ixgbe/README
new file mode 100644 (file)
index 0000000..d0e7bdb
--- /dev/null
@@ -0,0 +1,70 @@
+..
+  BSD LICENSE
+
+  Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+  All rights reserved.
+
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions 
+  are met:
+
+    * Redistributions of source code must retain the above copyright 
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in 
+      the documentation and/or other materials provided with the 
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived 
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ version: DPDK.L.1.2.3-3
+
+Intel® IXGBE driver
+===================
+
+This directory contains code from the Intel® Network Adapter Driver for PCI-E
+10 Gigabit Network Connections under FreeBSD, version 2.4.4, dated 10/25/2011.
+This code is available from
+`http://downloadmirror.intel.com/14688/eng/ixgbe-2.4.4.tar.gz`
+
+This driver is valid for the product(s) listed below
+
+* Intel® 10 Gigabit AF DA Dual Port Server Adapter
+* Intel® 10 Gigabit AT Server Adapter
+* Intel® 10 Gigabit AT2 Server Adapter
+* Intel® 10 Gigabit CX4 Dual Port Server Adapter
+* Intel® 10 Gigabit XF LR Server Adapter
+* Intel® 10 Gigabit XF SR Dual Port Server Adapter
+* Intel® 10 Gigabit XF SR Server Adapter
+* Intel® 82598 10 Gigabit Ethernet Controller
+* Intel® 82599 10 Gigabit Ethernet Controller
+* Intel® Ethernet Controller X540-AT2
+* Intel® Ethernet Server Adapter X520 Series
+* Intel® Ethernet Server Adapter X520-T2
+
+Updating driver
+===============
+
+The following modifications have been made to this code to integrate it with the
+Intel® DPDK:
+
+
+ixgbe_osdep.h
+-------------
+
+The OS dependency layer has been extensively modified to support the drivers in
+the Intel® DPDK environment. It is expected that these files will not need to be
+changed on updating the driver.
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.c
new file mode 100644 (file)
index 0000000..1ec23c9
--- /dev/null
@@ -0,0 +1,5442 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixgbe.h"
+
+/*********************************************************************
+ *  Set this to one to display debug statistics
+ *********************************************************************/
+int             ixgbe_display_debug_stats = 0;
+
+/*********************************************************************
+ *  Driver version
+ *********************************************************************/
+char ixgbe_driver_version[] = "2.4.4";
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into ixgbe_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixgbe_vendor_info_t ixgbe_vendor_info_array[] =
+{
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, 0, 0, 0},
+       /* required last entry */
+       {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings
+ *********************************************************************/
+
+static char    *ixgbe_strings[] = {
+       "Intel(R) PRO/10GbE PCI-Express Network Driver"
+};
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int      ixgbe_probe(device_t);
+static int      ixgbe_attach(device_t);
+static int      ixgbe_detach(device_t);
+static int      ixgbe_shutdown(device_t);
+static void     ixgbe_start(struct ifnet *);
+static void     ixgbe_start_locked(struct tx_ring *, struct ifnet *);
+#if __FreeBSD_version >= 800000
+static int     ixgbe_mq_start(struct ifnet *, struct mbuf *);
+static int     ixgbe_mq_start_locked(struct ifnet *,
+                    struct tx_ring *, struct mbuf *);
+static void    ixgbe_qflush(struct ifnet *);
+#endif
+static int      ixgbe_ioctl(struct ifnet *, u_long, caddr_t);
+static void    ixgbe_init(void *);
+static void    ixgbe_init_locked(struct adapter *);
+static void     ixgbe_stop(void *);
+static void     ixgbe_media_status(struct ifnet *, struct ifmediareq *);
+static int      ixgbe_media_change(struct ifnet *);
+static void     ixgbe_identify_hardware(struct adapter *);
+static int      ixgbe_allocate_pci_resources(struct adapter *);
+static int      ixgbe_allocate_msix(struct adapter *);
+static int      ixgbe_allocate_legacy(struct adapter *);
+static int     ixgbe_allocate_queues(struct adapter *);
+static int     ixgbe_setup_msix(struct adapter *);
+static void    ixgbe_free_pci_resources(struct adapter *);
+static void    ixgbe_local_timer(void *);
+static int     ixgbe_setup_interface(device_t, struct adapter *);
+static void    ixgbe_config_link(struct adapter *);
+
+static int      ixgbe_allocate_transmit_buffers(struct tx_ring *);
+static int     ixgbe_setup_transmit_structures(struct adapter *);
+static void    ixgbe_setup_transmit_ring(struct tx_ring *);
+static void     ixgbe_initialize_transmit_units(struct adapter *);
+static void     ixgbe_free_transmit_structures(struct adapter *);
+static void     ixgbe_free_transmit_buffers(struct tx_ring *);
+
+static int      ixgbe_allocate_receive_buffers(struct rx_ring *);
+static int      ixgbe_setup_receive_structures(struct adapter *);
+static int     ixgbe_setup_receive_ring(struct rx_ring *);
+static void     ixgbe_initialize_receive_units(struct adapter *);
+static void     ixgbe_free_receive_structures(struct adapter *);
+static void     ixgbe_free_receive_buffers(struct rx_ring *);
+static void    ixgbe_setup_hw_rsc(struct rx_ring *);
+
+static void     ixgbe_enable_intr(struct adapter *);
+static void     ixgbe_disable_intr(struct adapter *);
+static void     ixgbe_update_stats_counters(struct adapter *);
+static bool    ixgbe_txeof(struct tx_ring *);
+static bool    ixgbe_rxeof(struct ix_queue *, int);
+static void    ixgbe_rx_checksum(u32, struct mbuf *, u32);
+static void     ixgbe_set_promisc(struct adapter *);
+static void     ixgbe_set_multi(struct adapter *);
+static void     ixgbe_update_link_status(struct adapter *);
+static void    ixgbe_refresh_mbufs(struct rx_ring *, int);
+static int      ixgbe_xmit(struct tx_ring *, struct mbuf **);
+static int     ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int     ixgbe_set_advertise(SYSCTL_HANDLER_ARGS);
+static int     ixgbe_dma_malloc(struct adapter *, bus_size_t,
+                   struct ixgbe_dma_alloc *, int);
+static void     ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *);
+static void    ixgbe_add_rx_process_limit(struct adapter *, const char *,
+                   const char *, int *, int);
+static bool    ixgbe_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool    ixgbe_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void    ixgbe_set_ivar(struct adapter *, u8, u8, s8);
+static void    ixgbe_configure_ivars(struct adapter *);
+static u8 *    ixgbe_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+
+static void    ixgbe_setup_vlan_hw_support(struct adapter *);
+static void    ixgbe_register_vlan(void *, struct ifnet *, u16);
+static void    ixgbe_unregister_vlan(void *, struct ifnet *, u16);
+
+static void     ixgbe_add_hw_stats(struct adapter *adapter);
+
+static __inline void ixgbe_rx_discard(struct rx_ring *, int);
+static __inline void ixgbe_rx_input(struct rx_ring *, struct ifnet *,
+                   struct mbuf *, u32);
+
+/* Support for pluggable optic modules */
+static bool    ixgbe_sfp_probe(struct adapter *);
+static void    ixgbe_setup_optics(struct adapter *);
+
+/* Legacy (single vector interrupt handler */
+static void    ixgbe_legacy_irq(void *);
+
+/* The MSI/X Interrupt handlers */
+static void    ixgbe_msix_que(void *);
+static void    ixgbe_msix_link(void *);
+
+/* Deferred interrupt tasklets */
+static void    ixgbe_handle_que(void *, int);
+static void    ixgbe_handle_link(void *, int);
+static void    ixgbe_handle_msf(void *, int);
+static void    ixgbe_handle_mod(void *, int);
+
+#ifdef IXGBE_FDIR
+static void    ixgbe_atr(struct tx_ring *, struct mbuf *);
+static void    ixgbe_reinit_fdir(void *, int);
+#endif
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixgbe_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe, ixgbe_probe),
+       DEVMETHOD(device_attach, ixgbe_attach),
+       DEVMETHOD(device_detach, ixgbe_detach),
+       DEVMETHOD(device_shutdown, ixgbe_shutdown),
+       {0, 0}
+};
+
+static driver_t ixgbe_driver = {
+       "ix", ixgbe_methods, sizeof(struct adapter),
+};
+
+devclass_t ixgbe_devclass;
+DRIVER_MODULE(ixgbe, pci, ixgbe_driver, ixgbe_devclass, 0, 0);
+
+MODULE_DEPEND(ixgbe, pci, 1, 1, 1);
+MODULE_DEPEND(ixgbe, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int ixgbe_enable_aim = TRUE;
+TUNABLE_INT("hw.ixgbe.enable_aim", &ixgbe_enable_aim);
+
+static int ixgbe_max_interrupt_rate = (8000000 / IXGBE_LOW_LATENCY);
+TUNABLE_INT("hw.ixgbe.max_interrupt_rate", &ixgbe_max_interrupt_rate);
+
+/* How many packets rxeof tries to clean at a time */
+static int ixgbe_rx_process_limit = 128;
+TUNABLE_INT("hw.ixgbe.rx_process_limit", &ixgbe_rx_process_limit);
+
+/*
+** Smart speed setting, default to on
+** this only works as a compile option
+** right now as its during attach, set
+** this to 'ixgbe_smart_speed_off' to
+** disable.
+*/
+static int ixgbe_smart_speed = ixgbe_smart_speed_on;
+
+/*
+ * MSIX should be the default for best performance,
+ * but this allows it to be forced off for testing.
+ */
+static int ixgbe_enable_msix = 1;
+TUNABLE_INT("hw.ixgbe.enable_msix", &ixgbe_enable_msix);
+
+/*
+ * Header split: this causes the hardware to DMA
+ * the header into a separate mbuf from the payload,
+ * it can be a performance win in some workloads, but
+ * in others it actually hurts, its off by default. 
+ */
+static bool ixgbe_header_split = FALSE;
+TUNABLE_INT("hw.ixgbe.hdr_split", &ixgbe_header_split);
+
+/*
+ * Number of Queues, can be set to 0,
+ * it then autoconfigures based on the
+ * number of cpus with a max of 8. This
+ * can be overriden manually here.
+ */
+static int ixgbe_num_queues = 0;
+TUNABLE_INT("hw.ixgbe.num_queues", &ixgbe_num_queues);
+
+/*
+** Number of TX descriptors per ring,
+** setting higher than RX as this seems
+** the better performing choice.
+*/
+static int ixgbe_txd = PERFORM_TXD;
+TUNABLE_INT("hw.ixgbe.txd", &ixgbe_txd);
+
+/* Number of RX descriptors per ring */
+static int ixgbe_rxd = PERFORM_RXD;
+TUNABLE_INT("hw.ixgbe.rxd", &ixgbe_rxd);
+
+/* Keep running tab on them for sanity check */
+static int ixgbe_total_ports;
+
+#ifdef IXGBE_FDIR
+/*
+** For Flow Director: this is the
+** number of TX packets we sample
+** for the filter pool, this means
+** every 20th packet will be probed.
+**
+** This feature can be disabled by 
+** setting this to 0.
+*/
+static int atr_sample_rate = 20;
+/* 
+** Flow Director actually 'steals'
+** part of the packet buffer as its
+** filter pool, this variable controls
+** how much it uses:
+**  0 = 64K, 1 = 128K, 2 = 256K
+*/
+static int fdir_pballoc = 1;
+#endif
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  ixgbe_probe determines if the driver should be loaded on
+ *  adapter based on PCI vendor/device id of the adapter.
+ *
+ *  return BUS_PROBE_DEFAULT on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_probe(device_t dev)
+{
+       ixgbe_vendor_info_t *ent;
+
+       u16     pci_vendor_id = 0;
+       u16     pci_device_id = 0;
+       u16     pci_subvendor_id = 0;
+       u16     pci_subdevice_id = 0;
+       char    adapter_name[256];
+
+       INIT_DEBUGOUT("ixgbe_probe: begin");
+
+       pci_vendor_id = pci_get_vendor(dev);
+       if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
+               return (ENXIO);
+
+       pci_device_id = pci_get_device(dev);
+       pci_subvendor_id = pci_get_subvendor(dev);
+       pci_subdevice_id = pci_get_subdevice(dev);
+
+       ent = ixgbe_vendor_info_array;
+       while (ent->vendor_id != 0) {
+               if ((pci_vendor_id == ent->vendor_id) &&
+                   (pci_device_id == ent->device_id) &&
+
+                   ((pci_subvendor_id == ent->subvendor_id) ||
+                    (ent->subvendor_id == 0)) &&
+
+                   ((pci_subdevice_id == ent->subdevice_id) ||
+                    (ent->subdevice_id == 0))) {
+                       sprintf(adapter_name, "%s, Version - %s",
+                               ixgbe_strings[ent->index],
+                               ixgbe_driver_version);
+                       device_set_desc_copy(dev, adapter_name);
+                       ++ixgbe_total_ports;
+                       return (BUS_PROBE_DEFAULT);
+               }
+               ent++;
+       }
+       return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_attach(device_t dev)
+{
+       struct adapter *adapter;
+       struct ixgbe_hw *hw;
+       int             error = 0;
+       u16             csum;
+       u32             ctrl_ext;
+
+       INIT_DEBUGOUT("ixgbe_attach: begin");
+
+       if (resource_disabled("ixgbe", device_get_unit(dev))) {
+               device_printf(dev, "Disabled by device hint\n");
+               return (ENXIO);
+       }
+
+       /* Allocate, clear, and link in our adapter structure */
+       adapter = device_get_softc(dev);
+       adapter->dev = adapter->osdep.dev = dev;
+       hw = &adapter->hw;
+
+       /* Core Lock Init*/
+       IXGBE_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+       /* SYSCTL APIs */
+
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixgbe_set_flowcntl, "I", "Flow Control");
+
+        SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+                       &ixgbe_enable_aim, 1, "Interrupt Moderation");
+
+       /*
+       ** Allow a kind of speed control by forcing the autoneg
+       ** advertised speed list to only a certain value, this
+       ** supports 1G on 82599 devices, and 100Mb on x540.
+       */
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixgbe_set_advertise, "I", "Link Speed");
+
+
+       /* Set up the timer callout */
+       callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+       /* Determine hardware revision */
+       ixgbe_identify_hardware(adapter);
+
+       /* Do base PCI setup - map BAR0 */
+       if (ixgbe_allocate_pci_resources(adapter)) {
+               device_printf(dev, "Allocation of PCI resources failed\n");
+               error = ENXIO;
+               goto err_out;
+       }
+
+       /* Do descriptor calc and sanity checks */
+       if (((ixgbe_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
+           ixgbe_txd < MIN_TXD || ixgbe_txd > MAX_TXD) {
+               device_printf(dev, "TXD config issue, using default!\n");
+               adapter->num_tx_desc = DEFAULT_TXD;
+       } else
+               adapter->num_tx_desc = ixgbe_txd;
+
+       /*
+       ** With many RX rings it is easy to exceed the
+       ** system mbuf allocation. Tuning nmbclusters
+       ** can alleviate this.
+       */
+       if (nmbclusters > 0 ) {
+               int s;
+               s = (ixgbe_rxd * adapter->num_queues) * ixgbe_total_ports;
+               if (s > nmbclusters) {
+                       device_printf(dev, "RX Descriptors exceed "
+                           "system mbuf max, using default instead!\n");
+                       ixgbe_rxd = DEFAULT_RXD;
+               }
+       }
+
+       if (((ixgbe_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
+           ixgbe_rxd < MIN_TXD || ixgbe_rxd > MAX_TXD) {
+               device_printf(dev, "RXD config issue, using default!\n");
+               adapter->num_rx_desc = DEFAULT_RXD;
+       } else
+               adapter->num_rx_desc = ixgbe_rxd;
+
+       /* Allocate our TX/RX Queues */
+       if (ixgbe_allocate_queues(adapter)) {
+               error = ENOMEM;
+               goto err_out;
+       }
+
+       /* Allocate multicast array memory. */
+       adapter->mta = malloc(sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+           MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
+       if (adapter->mta == NULL) {
+               device_printf(dev, "Can not allocate multicast setup array\n");
+               error = ENOMEM;
+               goto err_late;
+       }
+
+       /* Initialize the shared code */
+       error = ixgbe_init_shared_code(hw);
+       if (error == IXGBE_ERR_SFP_NOT_PRESENT) {
+               /*
+               ** No optics in this port, set up
+               ** so the timer routine will probe 
+               ** for later insertion.
+               */
+               adapter->sfp_probe = TRUE;
+               error = 0;
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,"Unsupported SFP+ module detected!\n");
+               error = EIO;
+               goto err_late;
+       } else if (error) {
+               device_printf(dev,"Unable to initialize the shared code\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* Make sure we have a good EEPROM before we read from it */
+       if (ixgbe_validate_eeprom_checksum(&adapter->hw, &csum) < 0) {
+               device_printf(dev,"The EEPROM Checksum Is Not Valid\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = ixgbe_fc_full;
+       adapter->fc = hw->fc.requested_mode;
+       hw->fc.pause_time = IXGBE_FC_PAUSE;
+       hw->fc.low_water = IXGBE_FC_LO;
+       hw->fc.high_water[0] = IXGBE_FC_HI;
+       hw->fc.send_xon = TRUE;
+
+       error = ixgbe_init_hw(hw);
+       if (error == IXGBE_ERR_EEPROM_VERSION) {
+               device_printf(dev, "This device is a pre-production adapter/"
+                   "LOM.  Please be aware there may be issues associated "
+                   "with your hardware.\n If you are experiencing problems "
+                   "please contact your Intel or hardware representative "
+                   "who provided you with this hardware.\n");
+       } else if (error == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               device_printf(dev,"Unsupported SFP+ Module\n");
+
+       if (error) {
+               error = EIO;
+               device_printf(dev,"Hardware Initialization Failure\n");
+               goto err_late;
+       }
+
+       /* Detect and set physical type */
+       ixgbe_setup_optics(adapter);
+
+       if ((adapter->msix > 1) && (ixgbe_enable_msix))
+               error = ixgbe_allocate_msix(adapter); 
+       else
+               error = ixgbe_allocate_legacy(adapter); 
+       if (error) 
+               goto err_late;
+
+       /* Setup OS specific network interface */
+       if (ixgbe_setup_interface(dev, adapter) != 0)
+               goto err_late;
+
+       /* Sysctl for limiting the amount of work done in the taskqueue */
+       ixgbe_add_rx_process_limit(adapter, "rx_processing_limit",
+           "max number of rx packets to process", &adapter->rx_process_limit,
+           ixgbe_rx_process_limit);
+
+       /* Initialize statistics */
+       ixgbe_update_stats_counters(adapter);
+
+       /* Register for VLAN events */
+       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+           ixgbe_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+           ixgbe_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+        /* Print PCIE bus type/speed/width info */
+       ixgbe_get_bus_info(hw);
+       device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+           ((hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0Gb/s":
+           (hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5Gb/s":"Unknown"),
+           (hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
+           (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
+           (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+           ("Unknown"));
+
+       if ((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
+           (hw->bus.speed == ixgbe_bus_speed_2500)) {
+               device_printf(dev, "PCI-Express bandwidth available"
+                   " for this card\n     is not sufficient for"
+                   " optimal performance.\n");
+               device_printf(dev, "For optimal performance a x8 "
+                   "PCIE, or x4 PCIE 2 slot is required.\n");
+        }
+
+       /* let hardware know driver is loaded */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       ixgbe_add_hw_stats(adapter);
+
+       INIT_DEBUGOUT("ixgbe_attach: end");
+       return (0);
+err_late:
+       ixgbe_free_transmit_structures(adapter);
+       ixgbe_free_receive_structures(adapter);
+err_out:
+       if (adapter->ifp != NULL)
+               if_free(adapter->ifp);
+       ixgbe_free_pci_resources(adapter);
+       free(adapter->mta, M_DEVBUF);
+       return (error);
+
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixgbe_detach(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       struct ix_queue *que = adapter->queues;
+       u32     ctrl_ext;
+
+       INIT_DEBUGOUT("ixgbe_detach: begin");
+
+       /* Make sure VLANS are not using driver */
+       if (adapter->ifp->if_vlantrunk != NULL) {
+               device_printf(dev,"Vlan in use, detach first\n");
+               return (EBUSY);
+       }
+
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_stop(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               if (que->tq) {
+                       taskqueue_drain(que->tq, &que->que_task);
+                       taskqueue_free(que->tq);
+               }
+       }
+
+       /* Drain the Link queue */
+       if (adapter->tq) {
+               taskqueue_drain(adapter->tq, &adapter->link_task);
+               taskqueue_drain(adapter->tq, &adapter->mod_task);
+               taskqueue_drain(adapter->tq, &adapter->msf_task);
+#ifdef IXGBE_FDIR
+               taskqueue_drain(adapter->tq, &adapter->fdir_task);
+#endif
+               taskqueue_free(adapter->tq);
+       }
+
+       /* let hardware know driver is unloading */
+       ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT);
+       ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       /* Unregister VLAN events */
+       if (adapter->vlan_attach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+       if (adapter->vlan_detach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+       ether_ifdetach(adapter->ifp);
+       callout_drain(&adapter->timer);
+       ixgbe_free_pci_resources(adapter);
+       bus_generic_detach(dev);
+       if_free(adapter->ifp);
+
+       ixgbe_free_transmit_structures(adapter);
+       ixgbe_free_receive_structures(adapter);
+       free(adapter->mta, M_DEVBUF);
+
+       IXGBE_CORE_LOCK_DESTROY(adapter);
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+
+static int
+ixgbe_shutdown(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_stop(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+       return (0);
+}
+
+
+/*********************************************************************
+ *  Transmit entry point
+ *
+ *  ixgbe_start is called by the stack to initiate a transmit.
+ *  The driver will remain in this routine as long as there are
+ *  packets to transmit and transmit resources are available.
+ *  In case resources are not available stack is notified and
+ *  the packet is requeued.
+ **********************************************************************/
+
+static void
+ixgbe_start_locked(struct tx_ring *txr, struct ifnet * ifp)
+{
+       struct mbuf    *m_head;
+       struct adapter *adapter = txr->adapter;
+
+       IXGBE_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING)
+               return;
+       if (!adapter->link_active)
+               return;
+
+       while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+
+               IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+               if (m_head == NULL)
+                       break;
+
+               if (ixgbe_xmit(txr, &m_head)) {
+                       if (m_head == NULL)
+                               break;
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+                       break;
+               }
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, m_head);
+
+               /* Set watchdog on */
+               txr->watchdog_time = ticks;
+               txr->queue_status = IXGBE_QUEUE_WORKING;
+
+       }
+       return;
+}
+
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
+static void
+ixgbe_start(struct ifnet *ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               IXGBE_TX_LOCK(txr);
+               ixgbe_start_locked(txr, ifp);
+               IXGBE_TX_UNLOCK(txr);
+       }
+       return;
+}
+
+#if __FreeBSD_version >= 800000
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+ixgbe_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       int             i = 0, err = 0;
+
+       /* Which queue to use */
+       if ((m->m_flags & M_FLOWID) != 0)
+               i = m->m_pkthdr.flowid % adapter->num_queues;
+
+       txr = &adapter->tx_rings[i];
+       que = &adapter->queues[i];
+
+       if (IXGBE_TX_TRYLOCK(txr)) {
+               err = ixgbe_mq_start_locked(ifp, txr, m);
+               IXGBE_TX_UNLOCK(txr);
+       } else {
+               err = drbr_enqueue(ifp, txr->br, m);
+               taskqueue_enqueue(que->tq, &que->que_task);
+       }
+
+       return (err);
+}
+
+static int
+ixgbe_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+       struct adapter  *adapter = txr->adapter;
+        struct mbuf     *next;
+        int             enqueued, err = 0;
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING || adapter->link_active == 0) {
+               if (m != NULL)
+                       err = drbr_enqueue(ifp, txr->br, m);
+               return (err);
+       }
+
+       enqueued = 0;
+       if (m == NULL) {
+               next = drbr_dequeue(ifp, txr->br);
+       } else if (drbr_needs_enqueue(ifp, txr->br)) {
+               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+                       return (err);
+               next = drbr_dequeue(ifp, txr->br);
+       } else
+               next = m;
+
+       /* Process the queue */
+       while (next != NULL) {
+               if ((err = ixgbe_xmit(txr, &next)) != 0) {
+                       if (next != NULL)
+                               err = drbr_enqueue(ifp, txr->br, next);
+                       break;
+               }
+               enqueued++;
+               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, next);
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD)
+                       ixgbe_txeof(txr);
+               if (txr->tx_avail < IXGBE_TX_OP_THRESHOLD) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               next = drbr_dequeue(ifp, txr->br);
+       }
+
+       if (enqueued > 0) {
+               /* Set watchdog on */
+               txr->queue_status = IXGBE_QUEUE_WORKING;
+               txr->watchdog_time = ticks;
+       }
+
+       return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void
+ixgbe_qflush(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct mbuf     *m;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXGBE_TX_LOCK(txr);
+               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+                       m_freem(m);
+               IXGBE_TX_UNLOCK(txr);
+       }
+       if_qflush(ifp);
+}
+#endif /* __FreeBSD_version >= 800000 */
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  ixgbe_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixgbe_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ifreq    *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+       struct ifaddr *ifa = (struct ifaddr *)data;
+       bool            avoid_reset = FALSE;
+#endif
+       int             error = 0;
+
+       switch (command) {
+
+        case SIOCSIFADDR:
+#ifdef INET
+               if (ifa->ifa_addr->sa_family == AF_INET)
+                       avoid_reset = TRUE;
+#endif
+#ifdef INET6
+               if (ifa->ifa_addr->sa_family == AF_INET6)
+                       avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+               /*
+               ** Calling init results in link renegotiation,
+               ** so we avoid doing it when possible.
+               */
+               if (avoid_reset) {
+                       ifp->if_flags |= IFF_UP;
+                       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+                               ixgbe_init(adapter);
+                       if (!(ifp->if_flags & IFF_NOARP))
+                               arp_ifinit(ifp, ifa);
+               } else
+                       error = ether_ioctl(ifp, command, data);
+               break;
+#endif
+       case SIOCSIFMTU:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+               if (ifr->ifr_mtu > IXGBE_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+                       error = EINVAL;
+               } else {
+                       IXGBE_CORE_LOCK(adapter);
+                       ifp->if_mtu = ifr->ifr_mtu;
+                       adapter->max_frame_size =
+                               ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+                       ixgbe_init_locked(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+               IXGBE_CORE_LOCK(adapter);
+               if (ifp->if_flags & IFF_UP) {
+                       if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+                               if ((ifp->if_flags ^ adapter->if_flags) &
+                                   (IFF_PROMISC | IFF_ALLMULTI)) {
+                                       ixgbe_set_promisc(adapter);
+                                }
+                       } else
+                               ixgbe_init_locked(adapter);
+               } else
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+                               ixgbe_stop(adapter);
+               adapter->if_flags = ifp->if_flags;
+               IXGBE_CORE_UNLOCK(adapter);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IXGBE_CORE_LOCK(adapter);
+                       ixgbe_disable_intr(adapter);
+                       ixgbe_set_multi(adapter);
+                       ixgbe_enable_intr(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+               break;
+       case SIOCSIFCAP:
+       {
+               int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+               if (mask & IFCAP_HWCSUM)
+                       ifp->if_capenable ^= IFCAP_HWCSUM;
+               if (mask & IFCAP_TSO4)
+                       ifp->if_capenable ^= IFCAP_TSO4;
+               if (mask & IFCAP_LRO)
+                       ifp->if_capenable ^= IFCAP_LRO;
+               if (mask & IFCAP_VLAN_HWTAGGING)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+               if (mask & IFCAP_VLAN_HWFILTER)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+               if (mask & IFCAP_VLAN_HWTSO)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IXGBE_CORE_LOCK(adapter);
+                       ixgbe_init_locked(adapter);
+                       IXGBE_CORE_UNLOCK(adapter);
+               }
+               VLAN_CAPABILITIES(ifp);
+               break;
+       }
+
+       default:
+               IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+
+       return (error);
+}
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ixgbe_init_locked(struct adapter *adapter)
+{
+       struct ifnet   *ifp = adapter->ifp;
+       device_t        dev = adapter->dev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             k, txdctl, mhadd, gpie;
+       u32             rxdctl, rxctrl;
+
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+       INIT_DEBUGOUT("ixgbe_init: begin");
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+        callout_stop(&adapter->timer);
+
+        /* reprogram the RAR[0] in case user changed it. */
+        ixgbe_set_rar(hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+       /* Get the latest mac address, User can use a LAA */
+       bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
+             IXGBE_ETH_LENGTH_OF_ADDRESS);
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+       hw->addr_ctrl.rar_used_count = 1;
+
+       /* Set the various hardware offload abilities */
+       ifp->if_hwassist = 0;
+       if (ifp->if_capenable & IFCAP_TSO4)
+               ifp->if_hwassist |= CSUM_TSO;
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+               if (hw->mac.type != ixgbe_mac_82598EB)
+                       ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+
+       /* Prepare transmit descriptors and buffers */
+       if (ixgbe_setup_transmit_structures(adapter)) {
+               device_printf(dev,"Could not setup transmit structures\n");
+               ixgbe_stop(adapter);
+               return;
+       }
+
+       ixgbe_init_hw(hw);
+       ixgbe_initialize_transmit_units(adapter);
+
+       /* Setup Multicast table */
+       ixgbe_set_multi(adapter);
+
+       /*
+       ** Determine the correct mbuf pool
+       ** for doing jumbo/headersplit
+       */
+       if (adapter->max_frame_size <= 2048)
+               adapter->rx_mbuf_sz = MCLBYTES;
+       else if (adapter->max_frame_size <= 4096)
+               adapter->rx_mbuf_sz = MJUMPAGESIZE;
+       else if (adapter->max_frame_size <= 9216)
+               adapter->rx_mbuf_sz = MJUM9BYTES;
+       else
+               adapter->rx_mbuf_sz = MJUM16BYTES;
+
+       /* Prepare receive descriptors and buffers */
+       if (ixgbe_setup_receive_structures(adapter)) {
+               device_printf(dev,"Could not setup receive structures\n");
+               ixgbe_stop(adapter);
+               return;
+       }
+
+       /* Configure RX settings */
+       ixgbe_initialize_receive_units(adapter);
+
+       gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+
+       /* Enable Fan Failure Interrupt */
+       gpie |= IXGBE_SDP1_GPIEN;
+
+       /* Add for Module detection */
+       if (hw->mac.type == ixgbe_mac_82599EB)
+               gpie |= IXGBE_SDP2_GPIEN;
+
+       /* Thermal Failure Detection */
+       if (hw->mac.type == ixgbe_mac_X540)
+               gpie |= IXGBE_SDP0_GPIEN;
+
+       if (adapter->msix > 1) {
+               /* Enable Enhanced MSIX mode */
+               gpie |= IXGBE_GPIE_MSIX_MODE;
+               gpie |= IXGBE_GPIE_EIAME | IXGBE_GPIE_PBA_SUPPORT |
+                   IXGBE_GPIE_OCD;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* Set MTU size */
+       if (ifp->if_mtu > ETHERMTU) {
+               mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+               mhadd &= ~IXGBE_MHADD_MFS_MASK;
+               mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+       }
+       
+       /* Now enable all the queues */
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               /* Set WTHRESH to 8, burst writeback */
+               txdctl |= (8 << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+       }
+
+       for (int i = 0; i < adapter->num_queues; i++) {
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       /*
+                       ** PTHRESH = 21
+                       ** HTHRESH = 4
+                       ** WTHRESH = 8
+                       */
+                       rxdctl &= ~0x3FFFFF;
+                       rxdctl |= 0x080420;
+               }
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+               for (k = 0; k < 10; k++) {
+                       if (IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               break;
+                       else
+                               msec_delay(1);
+               }
+               wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), adapter->num_rx_desc - 1);
+       }
+
+       /* Set up VLAN support and filter */
+       ixgbe_setup_vlan_hw_support(adapter);
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       ixgbe_enable_rx_dma(hw, rxctrl);
+
+       callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
+
+       /* Set up MSI/X routing */
+       if (ixgbe_enable_msix)  {
+               ixgbe_configure_ivars(adapter);
+               /* Set up auto-mask */
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+               else {
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+                       IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+               }
+       } else {  /* Simple settings for Legacy/MSI */
+                ixgbe_set_ivar(adapter, 0, 0, 0);
+                ixgbe_set_ivar(adapter, 0, 0, 1);
+               IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+       }
+
+#ifdef IXGBE_FDIR
+       /* Init Flow director */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               u32 hdrm = 64 << fdir_pballoc;
+
+               hw->mac.ops.setup_rxpba(hw, 0, hdrm, PBA_STRATEGY_EQUAL);
+               ixgbe_init_fdir_signature_82599(&adapter->hw, fdir_pballoc);
+       }
+#endif
+
+       /*
+       ** Check on any SFP devices that
+       ** need to be kick-started
+       */
+       if (hw->phy.type == ixgbe_phy_none) {
+               int err = hw->phy.ops.identify(hw);
+               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       device_printf(dev,
+                           "Unsupported SFP+ module type was detected.\n");
+                       return;
+               }
+       }
+
+       /* Set moderation on the Link interrupt */
+       IXGBE_WRITE_REG(hw, IXGBE_EITR(adapter->linkvec), IXGBE_LINK_ITR);
+
+       /* Config/Enable Link */
+       ixgbe_config_link(adapter);
+
+       /* And now turn on interrupts */
+       ixgbe_enable_intr(adapter);
+
+       /* Now inform the stack we're ready */
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+       return;
+}
+
+static void
+ixgbe_init(void *arg)
+{
+       struct adapter *adapter = arg;
+
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+       return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static inline void
+ixgbe_enable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64     queue = (u64)(1 << vector);
+       u32     mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+                IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+       } else {
+                mask = (queue & 0xFFFFFFFF);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+                mask = (queue >> 32);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+       }
+}
+
+static inline void
+ixgbe_disable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64     queue = (u64)(1 << vector);
+       u32     mask;
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+                mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+                IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+       } else {
+                mask = (queue & 0xFFFFFFFF);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
+                mask = (queue >> 32);
+                if (mask)
+                        IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+       }
+}
+
+static inline void
+ixgbe_rearm_queues(struct adapter *adapter, u64 queues)
+{
+       u32 mask;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
+       } else {
+               mask = (queues & 0xFFFFFFFF);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
+               mask = (queues >> 32);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+       }
+}
+
+
+static void
+ixgbe_handle_que(void *context, int pending)
+{
+       struct ix_queue *que = context;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct ifnet    *ifp = adapter->ifp;
+       bool            more;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               more = ixgbe_rxeof(que, adapter->rx_process_limit);
+               IXGBE_TX_LOCK(txr);
+               ixgbe_txeof(txr);
+#if __FreeBSD_version >= 800000
+               if (!drbr_empty(ifp, txr->br))
+                       ixgbe_mq_start_locked(ifp, txr, NULL);
+#else
+               if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+                       ixgbe_start_locked(txr, ifp);
+#endif
+               IXGBE_TX_UNLOCK(txr);
+               if (more) {
+                       taskqueue_enqueue(que->tq, &que->que_task);
+                       return;
+               }
+       }
+
+       /* Reenable this interrupt */
+       ixgbe_enable_queue(adapter, que->msix);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+
+static void
+ixgbe_legacy_irq(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct adapter  *adapter = que->adapter;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct          tx_ring *txr = adapter->tx_rings;
+       bool            more_tx, more_rx;
+       u32             reg_eicr, loop = MAX_LOOP;
+
+
+       reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+
+       ++que->irqs;
+       if (reg_eicr == 0) {
+               ixgbe_enable_intr(adapter);
+               return;
+       }
+
+       more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+
+       IXGBE_TX_LOCK(txr);
+       do {
+               more_tx = ixgbe_txeof(txr);
+       } while (loop-- && more_tx);
+       IXGBE_TX_UNLOCK(txr);
+
+       if (more_rx || more_tx)
+               taskqueue_enqueue(que->tq, &que->que_task);
+
+       /* Check for fan failure */
+       if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
+                   "REPLACE IMMEDIATELY!!\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1);
+       }
+
+       /* Link status change */
+       if (reg_eicr & IXGBE_EICR_LSC)
+               taskqueue_enqueue(adapter->tq, &adapter->link_task);
+
+       ixgbe_enable_intr(adapter);
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  MSIX Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixgbe_msix_que(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct rx_ring  *rxr = que->rxr;
+       bool            more_tx, more_rx;
+       u32             newitr = 0;
+
+       ++que->irqs;
+
+       more_rx = ixgbe_rxeof(que, adapter->rx_process_limit);
+
+       IXGBE_TX_LOCK(txr);
+       more_tx = ixgbe_txeof(txr);
+       /*
+       ** Make certain that if the stack 
+       ** has anything queued the task gets
+       ** scheduled to handle it.
+       */
+#if __FreeBSD_version < 800000
+       if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+#else
+       if (!drbr_empty(adapter->ifp, txr->br))
+#endif
+               more_tx = 1;
+       IXGBE_TX_UNLOCK(txr);
+
+       /* Do AIM now? */
+
+       if (ixgbe_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+        **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+        if (que->eitr_setting)
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_EITR(que->msix), que->eitr_setting);
+        que->eitr_setting = 0;
+
+        /* Idle, do nothing */
+        if ((txr->bytes == 0) && (rxr->bytes == 0))
+                goto no_calc;
+                                
+       if ((txr->bytes) && (txr->packets))
+                       newitr = txr->bytes/txr->packets;
+       if ((rxr->bytes) && (rxr->packets))
+               newitr = max(newitr,
+                   (rxr->bytes / rxr->packets));
+       newitr += 24; /* account for hardware frame, crc */
+
+       /* set an upper boundary */
+       newitr = min(newitr, 3000);
+
+       /* Be nice to the mid range */
+       if ((newitr > 300) && (newitr < 1200))
+               newitr = (newitr / 3);
+       else
+               newitr = (newitr / 2);
+
+        if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+                newitr |= newitr << 16;
+        else
+                newitr |= IXGBE_EITR_CNT_WDIS;
+                 
+        /* save for next interrupt */
+        que->eitr_setting = newitr;
+
+        /* Reset state */
+        txr->bytes = 0;
+        txr->packets = 0;
+        rxr->bytes = 0;
+        rxr->packets = 0;
+
+no_calc:
+       if (more_tx || more_rx)
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else /* Reenable this interrupt */
+               ixgbe_enable_queue(adapter, que->msix);
+       return;
+}
+
+
+static void
+ixgbe_msix_link(void *arg)
+{
+       struct adapter  *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             reg_eicr;
+
+       ++adapter->link_irq;
+
+       /* First get the cause */
+       reg_eicr = IXGBE_READ_REG(hw, IXGBE_EICS);
+       /* Clear interrupt with write */
+       IXGBE_WRITE_REG(hw, IXGBE_EICR, reg_eicr);
+
+       /* Link status change */
+       if (reg_eicr & IXGBE_EICR_LSC)
+               taskqueue_enqueue(adapter->tq, &adapter->link_task);
+
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
+#ifdef IXGBE_FDIR
+               if (reg_eicr & IXGBE_EICR_FLOW_DIR) {
+                       /* This is probably overkill :) */
+                       if (!atomic_cmpset_int(&adapter->fdir_reinit, 0, 1))
+                               return;
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_FLOW_DIR);
+                       /* Turn off the interface */
+                       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+                       taskqueue_enqueue(adapter->tq, &adapter->fdir_task);
+               } else
+#endif
+               if (reg_eicr & IXGBE_EICR_ECC) {
+                       device_printf(adapter->dev, "\nCRITICAL: ECC ERROR!! "
+                           "Please Reboot!!\n");
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+               } else
+
+               if (reg_eicr & IXGBE_EICR_GPI_SDP1) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+                       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+               } else if (reg_eicr & IXGBE_EICR_GPI_SDP2) {
+                       /* Clear the interrupt */
+                       IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+                       taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+               }
+        } 
+
+       /* Check for fan failure */
+       if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP1)) {
+                device_printf(adapter->dev, "\nCRITICAL: FAN FAILURE!! "
+                   "REPLACE IMMEDIATELY!!\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
+       }
+
+       /* Check for over temp condition */
+       if ((hw->mac.type == ixgbe_mac_X540) &&
+           (reg_eicr & IXGBE_EICR_GPI_SDP0)) {
+                device_printf(adapter->dev, "\nCRITICAL: OVER TEMP!! "
+                   "PHY IS SHUT DOWN!!\n");
+                device_printf(adapter->dev, "System shutdown required\n");
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP0);
+       }
+
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixgbe_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+       struct adapter *adapter = ifp->if_softc;
+
+       INIT_DEBUGOUT("ixgbe_media_status: begin");
+       IXGBE_CORE_LOCK(adapter);
+       ixgbe_update_link_status(adapter);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!adapter->link_active) {
+               IXGBE_CORE_UNLOCK(adapter);
+               return;
+       }
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       switch (adapter->link_speed) {
+               case IXGBE_LINK_SPEED_100_FULL:
+                       ifmr->ifm_active |= IFM_100_TX | IFM_FDX;
+                       break;
+               case IXGBE_LINK_SPEED_1GB_FULL:
+                       ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+                       break;
+               case IXGBE_LINK_SPEED_10GB_FULL:
+                       ifmr->ifm_active |= adapter->optics | IFM_FDX;
+                       break;
+       }
+
+       IXGBE_CORE_UNLOCK(adapter);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixgbe_media_change(struct ifnet * ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct ifmedia *ifm = &adapter->media;
+
+       INIT_DEBUGOUT("ixgbe_media_change: begin");
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return (EINVAL);
+
+        switch (IFM_SUBTYPE(ifm->ifm_media)) {
+        case IFM_AUTO:
+                adapter->hw.phy.autoneg_advertised =
+                   IXGBE_LINK_SPEED_100_FULL |
+                   IXGBE_LINK_SPEED_1GB_FULL |
+                   IXGBE_LINK_SPEED_10GB_FULL;
+                break;
+        default:
+                device_printf(adapter->dev, "Only auto media type\n");
+               return (EINVAL);
+        }
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to tx descriptors, allowing the
+ *  TX engine to transmit the packets. 
+ *     - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+
+static int
+ixgbe_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+       struct adapter  *adapter = txr->adapter;
+       u32             olinfo_status = 0, cmd_type_len;
+       u32             paylen = 0;
+       int             i, j, error, nsegs;
+       int             first, last = 0;
+       struct mbuf     *m_head;
+       bus_dma_segment_t segs[adapter->num_segs];
+       bus_dmamap_t    map;
+       struct ixgbe_tx_buf *txbuf;
+       union ixgbe_adv_tx_desc *txd = NULL;
+
+       m_head = *m_headp;
+
+       /* Basic descriptor defines */
+        cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+           IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+        /*
+         * Important to capture the first descriptor
+         * used because it will contain the index of
+         * the one we tell the hardware to report back
+         */
+        first = txr->next_avail_desc;
+       txbuf = &txr->tx_buffers[first];
+       map = txbuf->map;
+
+       /*
+        * Map the packet for DMA.
+        */
+       error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+           *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+       if (error == EFBIG) {
+               struct mbuf *m;
+
+               m = m_defrag(*m_headp, M_DONTWAIT);
+               if (m == NULL) {
+                       adapter->mbuf_defrag_failed++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (ENOBUFS);
+               }
+               *m_headp = m;
+
+               /* Try it again */
+               error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+                   *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+               if (error == ENOMEM) {
+                       adapter->no_tx_dma_setup++;
+                       return (error);
+               } else if (error != 0) {
+                       adapter->no_tx_dma_setup++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (error);
+               }
+       } else if (error == ENOMEM) {
+               adapter->no_tx_dma_setup++;
+               return (error);
+       } else if (error != 0) {
+               adapter->no_tx_dma_setup++;
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return (error);
+       }
+
+       /* Make certain there are enough descriptors */
+       if (nsegs > txr->tx_avail - 2) {
+               txr->no_desc_avail++;
+               error = ENOBUFS;
+               goto xmit_fail;
+       }
+       m_head = *m_headp;
+
+       /*
+       ** Set up the appropriate offload context
+       ** this becomes the first descriptor of 
+       ** a packet.
+       */
+       if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               if (ixgbe_tso_setup(txr, m_head, &paylen)) {
+                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+                       olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+                       olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+                       olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+                       ++adapter->tso_tx;
+               } else
+                       return (ENXIO);
+       } else if (ixgbe_tx_ctx_setup(txr, m_head))
+               olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+#ifdef IXGBE_IEEE1588
+        /* This is changing soon to an mtag detection */
+        if (we detect this mbuf has a TSTAMP mtag)
+                cmd_type_len |= IXGBE_ADVTXD_MAC_TSTAMP;
+#endif
+
+#ifdef IXGBE_FDIR
+       /* Do the flow director magic */
+       if ((txr->atr_sample) && (!adapter->fdir_reinit)) {
+               ++txr->atr_count;
+               if (txr->atr_count >= atr_sample_rate) {
+                       ixgbe_atr(txr, m_head);
+                       txr->atr_count = 0;
+               }
+       }
+#endif
+        /* Record payload length */
+       if (paylen == 0)
+               olinfo_status |= m_head->m_pkthdr.len <<
+                   IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+       i = txr->next_avail_desc;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seglen;
+               bus_addr_t segaddr;
+
+               txbuf = &txr->tx_buffers[i];
+               txd = &txr->tx_base[i];
+               seglen = segs[j].ds_len;
+               segaddr = htole64(segs[j].ds_addr);
+
+               txd->read.buffer_addr = segaddr;
+               txd->read.cmd_type_len = htole32(txr->txd_cmd |
+                   cmd_type_len |seglen);
+               txd->read.olinfo_status = htole32(olinfo_status);
+               last = i; /* descriptor that will get completion IRQ */
+
+               if (++i == adapter->num_tx_desc)
+                       i = 0;
+
+               txbuf->m_head = NULL;
+               txbuf->eop_index = -1;
+       }
+
+       txd->read.cmd_type_len |=
+           htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
+       txr->tx_avail -= nsegs;
+       txr->next_avail_desc = i;
+
+       txbuf->m_head = m_head;
+       /* Swap the dma map between the first and last descriptor */
+       txr->tx_buffers[first].map = txbuf->map;
+       txbuf->map = map;
+       bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+        /* Set the index of the descriptor that will be marked done */
+        txbuf = &txr->tx_buffers[first];
+       txbuf->eop_index = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       /*
+        * Advance the Transmit Descriptor Tail (Tdt), this tells the
+        * hardware that this frame is available to transmit.
+        */
+       ++txr->total_packets;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(txr->me), i);
+
+       return (0);
+
+xmit_fail:
+       bus_dmamap_unload(txr->txtag, txbuf->map);
+       return (error);
+
+}
+
+static void
+ixgbe_set_promisc(struct adapter *adapter)
+{
+       u_int32_t       reg_rctl;
+       struct ifnet   *ifp = adapter->ifp;
+
+       reg_rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+       reg_rctl &= (~IXGBE_FCTRL_UPE);
+       reg_rctl &= (~IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+
+       if (ifp->if_flags & IFF_PROMISC) {
+               reg_rctl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+       } else if (ifp->if_flags & IFF_ALLMULTI) {
+               reg_rctl |= IXGBE_FCTRL_MPE;
+               reg_rctl &= ~IXGBE_FCTRL_UPE;
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_rctl);
+       }
+       return;
+}
+
+
+/*********************************************************************
+ *  Multicast Update
+ *
+ *  This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+#define IXGBE_RAR_ENTRIES 16
+
+static void
+ixgbe_set_multi(struct adapter *adapter)
+{
+       u32     fctrl;
+       u8      *mta;
+       u8      *update_ptr;
+       struct  ifmultiaddr *ifma;
+       int     mcnt = 0;
+       struct ifnet   *ifp = adapter->ifp;
+
+       IOCTL_DEBUGOUT("ixgbe_set_multi: begin");
+
+       mta = adapter->mta;
+       bzero(mta, sizeof(u8) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+           MAX_NUM_MULTICAST_ADDRESSES);
+
+       fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
+       fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       if (ifp->if_flags & IFF_PROMISC)
+               fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       else if (ifp->if_flags & IFF_ALLMULTI) {
+               fctrl |= IXGBE_FCTRL_MPE;
+               fctrl &= ~IXGBE_FCTRL_UPE;
+       } else
+               fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
+
+#if __FreeBSD_version < 800000
+       IF_ADDR_LOCK(ifp);
+#else
+       if_maddr_rlock(ifp);
+#endif
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+               bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+                   &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+                   IXGBE_ETH_LENGTH_OF_ADDRESS);
+               mcnt++;
+       }
+#if __FreeBSD_version < 800000
+       IF_ADDR_UNLOCK(ifp);
+#else
+       if_maddr_runlock(ifp);
+#endif
+
+       update_ptr = mta;
+       ixgbe_update_mc_addr_list(&adapter->hw,
+           update_ptr, mcnt, ixgbe_mc_array_itr, TRUE);
+
+       return;
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ixgbe_set_multi() one by one.
+ */
+static u8 *
+ixgbe_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
+{
+       u8 *addr = *update_ptr;
+       u8 *newptr;
+       *vmdq = 0;
+
+       newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+       *update_ptr = newptr;
+       return addr;
+}
+
+
+/*********************************************************************
+ *  Timer routine
+ *
+ *  This routine checks for link status,updates statistics,
+ *  and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixgbe_local_timer(void *arg)
+{
+       struct adapter *adapter = arg;
+       device_t        dev = adapter->dev;
+       struct tx_ring *txr = adapter->tx_rings;
+
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+       /* Check for pluggable optics */
+       if (adapter->sfp_probe)
+               if (!ixgbe_sfp_probe(adapter))
+                       goto out; /* Nothing to do */
+
+       ixgbe_update_link_status(adapter);
+       ixgbe_update_stats_counters(adapter);
+
+       /*
+        * If the interface has been paused
+        * then don't do the watchdog check
+        */
+       if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
+               goto out;
+
+       /*
+       ** Check status on the TX queues for a hang
+       */
+        for (int i = 0; i < adapter->num_queues; i++, txr++)
+               if (txr->queue_status == IXGBE_QUEUE_HUNG)
+                       goto hung;
+
+out:
+       ixgbe_rearm_queues(adapter, adapter->que_mask);
+       callout_reset(&adapter->timer, hz, ixgbe_local_timer, adapter);
+       return;
+
+hung:
+       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+           IXGBE_READ_REG(&adapter->hw, IXGBE_TDH(txr->me)),
+           IXGBE_READ_REG(&adapter->hw, IXGBE_TDT(txr->me)));
+       device_printf(dev,"TX(%d) desc avail = %d,"
+           "Next TX to Clean = %d\n",
+           txr->me, txr->tx_avail, txr->next_to_clean);
+       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+       adapter->watchdog_events++;
+       ixgbe_init_locked(adapter);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+**     the real check of the hardware only happens with
+**     a link interrupt.
+*/
+static void
+ixgbe_update_link_status(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct tx_ring *txr = adapter->tx_rings;
+       device_t dev = adapter->dev;
+
+
+       if (adapter->link_up){ 
+               if (adapter->link_active == FALSE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is up %d Gbps %s \n",
+                                   ((adapter->link_speed == 128)? 10:1),
+                                   "Full Duplex");
+                       adapter->link_active = TRUE;
+                       if_link_state_change(ifp, LINK_STATE_UP);
+               }
+       } else { /* Link down */
+               if (adapter->link_active == TRUE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is Down\n");
+                       if_link_state_change(ifp, LINK_STATE_DOWN);
+                       adapter->link_active = FALSE;
+                       for (int i = 0; i < adapter->num_queues;
+                           i++, txr++)
+                               txr->queue_status = IXGBE_QUEUE_IDLE;
+               }
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixgbe_stop(void *arg)
+{
+       struct ifnet   *ifp;
+       struct adapter *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       ifp = adapter->ifp;
+
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+       INIT_DEBUGOUT("ixgbe_stop: begin\n");
+       ixgbe_disable_intr(adapter);
+
+       /* Tell the stack that the interface is no longer active */
+       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+       /* Turn off the laser */
+       if (hw->phy.multispeed_fiber)
+               ixgbe_disable_tx_laser(hw);
+       callout_stop(&adapter->timer);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV);
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixgbe_identify_hardware(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       /* Save off the information about this board */
+       hw->vendor_id = pci_get_vendor(dev);
+       hw->device_id = pci_get_device(dev);
+       hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       hw->subsystem_vendor_id =
+           pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       hw->subsystem_device_id =
+           pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       /* We need this here to set the num_segs below */
+       ixgbe_set_mac_type(hw);
+
+       /* Pick up the 82599 and VF settings */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               hw->phy.smart_speed = ixgbe_smart_speed;
+               adapter->num_segs = IXGBE_82599_SCATTER;
+       } else
+               adapter->num_segs = IXGBE_82598_SCATTER;
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Determine optic type
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_optics(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       int             layer;
+       
+       layer = ixgbe_get_supported_physical_layer(hw);
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T) {
+               adapter->optics = IFM_10G_T;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T) {
+               adapter->optics = IFM_1000_T;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_LR |
+           IXGBE_PHYSICAL_LAYER_10GBASE_LRM)) {
+               adapter->optics = IFM_10G_LR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_SR) {
+               adapter->optics = IFM_10G_SR;
+               return;
+       }
+
+       if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU) {
+               adapter->optics = IFM_10G_TWINAX;
+               return;
+       }
+
+       if (layer & (IXGBE_PHYSICAL_LAYER_10GBASE_KX4 |
+           IXGBE_PHYSICAL_LAYER_10GBASE_CX4)) {
+               adapter->optics = IFM_10G_CX4;
+               return;
+       }
+
+       /* If we get here just set the default */
+       adapter->optics = IFM_ETHER | IFM_AUTO;
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup the Legacy or MSI Interrupt handler
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_legacy(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       struct          ix_queue *que = adapter->queues;
+       int error, rid = 0;
+
+       /* MSI RID at 1 */
+       if (adapter->msix == 1)
+               rid = 1;
+
+       /* We allocate a single interrupt resource */
+       adapter->res = bus_alloc_resource_any(dev,
+            SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (adapter->res == NULL) {
+               device_printf(dev, "Unable to allocate bus resource: "
+                   "interrupt\n");
+               return (ENXIO);
+       }
+
+       /*
+        * Try allocating a fast interrupt and the associated deferred
+        * processing contexts.
+        */
+       TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
+       que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
+            taskqueue_thread_enqueue, &que->tq);
+       taskqueue_start_threads(&que->tq, 1, PI_NET, "%s ixq",
+            device_get_nameunit(adapter->dev));
+
+       /* Tasklets for Link, SFP and Multispeed Fiber */
+       TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
+       TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
+       TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+#ifdef IXGBE_FDIR
+       TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
+#endif
+       adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
+           taskqueue_thread_enqueue, &adapter->tq);
+       taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
+           device_get_nameunit(adapter->dev));
+
+       if ((error = bus_setup_intr(dev, adapter->res,
+            INTR_TYPE_NET | INTR_MPSAFE, NULL, ixgbe_legacy_irq,
+            que, &adapter->tag)) != 0) {
+               device_printf(dev, "Failed to register fast interrupt "
+                   "handler: %d\n", error);
+               taskqueue_free(que->tq);
+               taskqueue_free(adapter->tq);
+               que->tq = NULL;
+               adapter->tq = NULL;
+               return (error);
+       }
+       /* For simplicity in the handlers */
+       adapter->que_mask = IXGBE_EIMS_ENABLE_MASK;
+
+       return (0);
+}
+
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers 
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_msix(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct          ix_queue *que = adapter->queues;
+       int             error, rid, vector = 0;
+
+       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+               rid = vector + 1;
+               que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+                   RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,"Unable to allocate"
+                           " bus resource: que interrupt [%d]\n", vector);
+                       return (ENXIO);
+               }
+               /* Set the handler function */
+               error = bus_setup_intr(dev, que->res,
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
+                   ixgbe_msix_que, que, &que->tag);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register QUE handler");
+                       return (error);
+               }
+#if __FreeBSD_version >= 800504
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               adapter->que_mask |= (u64)(1 << que->msix);
+               /*
+               ** Bind the msix vector, and thus the
+               ** ring to the corresponding cpu.
+               */
+               if (adapter->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+
+               TASK_INIT(&que->que_task, 0, ixgbe_handle_que, que);
+               que->tq = taskqueue_create_fast("ixgbe_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+                   device_get_nameunit(adapter->dev));
+       }
+
+       /* and Link */
+       rid = vector + 1;
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (!adapter->res) {
+               device_printf(dev,"Unable to allocate"
+           " bus resource: Link interrupt [%d]\n", rid);
+               return (ENXIO);
+       }
+       /* Set the link handler function */
+       error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, NULL,
+           ixgbe_msix_link, adapter, &adapter->tag);
+       if (error) {
+               adapter->res = NULL;
+               device_printf(dev, "Failed to register LINK handler");
+               return (error);
+       }
+#if __FreeBSD_version >= 800504
+       bus_describe_intr(dev, adapter->res, adapter->tag, "link");
+#endif
+       adapter->linkvec = vector;
+       /* Tasklets for Link, SFP and Multispeed Fiber */
+       TASK_INIT(&adapter->link_task, 0, ixgbe_handle_link, adapter);
+       TASK_INIT(&adapter->mod_task, 0, ixgbe_handle_mod, adapter);
+       TASK_INIT(&adapter->msf_task, 0, ixgbe_handle_msf, adapter);
+#ifdef IXGBE_FDIR
+       TASK_INIT(&adapter->fdir_task, 0, ixgbe_reinit_fdir, adapter);
+#endif
+       adapter->tq = taskqueue_create_fast("ixgbe_link", M_NOWAIT,
+           taskqueue_thread_enqueue, &adapter->tq);
+       taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s linkq",
+           device_get_nameunit(adapter->dev));
+
+       return (0);
+}
+
+/*
+ * Setup Either MSI/X or MSI
+ */
+static int
+ixgbe_setup_msix(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       int rid, want, queues, msgs;
+
+       /* Override by tuneable */
+       if (ixgbe_enable_msix == 0)
+               goto msi;
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(MSIX_82598_BAR);
+       adapter->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+               if (!adapter->msix_mem) {
+               rid += 4;       /* 82599 maps in higher BAR */
+               adapter->msix_mem = bus_alloc_resource_any(dev,
+                   SYS_RES_MEMORY, &rid, RF_ACTIVE);
+       }
+               if (!adapter->msix_mem) {
+               /* May not be enabled */
+               device_printf(adapter->dev,
+                   "Unable to map MSIX table \n");
+               goto msi;
+       }
+
+       msgs = pci_msix_count(dev); 
+       if (msgs == 0) { /* system has msix disabled */
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   rid, adapter->msix_mem);
+               adapter->msix_mem = NULL;
+               goto msi;
+       }
+
+       /* Figure out a reasonable auto config value */
+       queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus;
+
+       if (ixgbe_num_queues != 0)
+               queues = ixgbe_num_queues;
+       /* Set max queues to 8 when autoconfiguring */
+       else if ((ixgbe_num_queues == 0) && (queues > 8))
+               queues = 8;
+
+       /*
+       ** Want one vector (RX/TX pair) per queue
+       ** plus an additional for Link.
+       */
+       want = queues + 1;
+       if (msgs >= want)
+               msgs = want;
+       else {
+                       device_printf(adapter->dev,
+                   "MSIX Configuration Problem, "
+                   "%d vectors but %d queues wanted!\n",
+                   msgs, want);
+               return (0); /* Will go to Legacy setup */
+       }
+       if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) {
+                       device_printf(adapter->dev,
+                   "Using MSIX interrupts with %d vectors\n", msgs);
+               adapter->num_queues = queues;
+               return (msgs);
+       }
+msi:
+               msgs = pci_msi_count(dev);
+               if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0)
+                       device_printf(adapter->dev,"Using an MSI interrupt\n");
+       else
+                       device_printf(adapter->dev,"Using a Legacy interrupt\n");
+       return (msgs);
+}
+
+
+static int
+ixgbe_allocate_pci_resources(struct adapter *adapter)
+{
+       int             rid;
+       device_t        dev = adapter->dev;
+
+       rid = PCIR_BAR(0);
+       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &rid, RF_ACTIVE);
+
+       if (!(adapter->pci_mem)) {
+               device_printf(dev,"Unable to allocate bus resource: memory\n");
+               return (ENXIO);
+       }
+
+       adapter->osdep.mem_bus_space_tag =
+               rman_get_bustag(adapter->pci_mem);
+       adapter->osdep.mem_bus_space_handle =
+               rman_get_bushandle(adapter->pci_mem);
+       adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+
+       /* Legacy defaults */
+       adapter->num_queues = 1;
+       adapter->hw.back = &adapter->osdep;
+
+       /*
+       ** Now setup MSI or MSI/X, should
+       ** return us the number of supported
+       ** vectors. (Will be 1 for MSI)
+       */
+       adapter->msix = ixgbe_setup_msix(adapter);
+       return (0);
+}
+
+static void
+ixgbe_free_pci_resources(struct adapter * adapter)
+{
+       struct          ix_queue *que = adapter->queues;
+       device_t        dev = adapter->dev;
+       int             rid, memrid;
+
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+               memrid = PCIR_BAR(MSIX_82598_BAR);
+       else
+               memrid = PCIR_BAR(MSIX_82599_BAR);
+
+       /*
+       ** There is a slight possibility of a failure mode
+       ** in attach that will result in entering this function
+       ** before interrupt resources have been initialized, and
+       ** in that case we do not want to execute the loops below
+       ** We can detect this reliably by the state of the adapter
+       ** res pointer.
+       */
+       if (adapter->res == NULL)
+               goto mem;
+
+       /*
+       **  Release all msix queue resources:
+       */
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               rid = que->msix + 1;
+               if (que->tag != NULL) {
+                       bus_teardown_intr(dev, que->res, que->tag);
+                       que->tag = NULL;
+               }
+               if (que->res != NULL)
+                       bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+       }
+
+
+       /* Clean the Legacy or Link interrupt last */
+       if (adapter->linkvec) /* we are doing MSIX */
+               rid = adapter->linkvec + 1;
+       else
+               (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+       if (adapter->tag != NULL) {
+               bus_teardown_intr(dev, adapter->res, adapter->tag);
+               adapter->tag = NULL;
+       }
+       if (adapter->res != NULL)
+               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+       if (adapter->msix)
+               pci_release_msi(dev);
+
+       if (adapter->msix_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   memrid, adapter->msix_mem);
+
+       if (adapter->pci_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(0), adapter->pci_mem);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_interface(device_t dev, struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ifnet   *ifp;
+
+       INIT_DEBUGOUT("ixgbe_setup_interface: begin");
+
+       ifp = adapter->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL) {
+               device_printf(dev, "can not allocate ifnet structure\n");
+               return (-1);
+       }
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_mtu = ETHERMTU;
+       ifp->if_baudrate = 1000000000;
+       ifp->if_init = ixgbe_init;
+       ifp->if_softc = adapter;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = ixgbe_ioctl;
+       ifp->if_start = ixgbe_start;
+#if __FreeBSD_version >= 800000
+       ifp->if_transmit = ixgbe_mq_start;
+       ifp->if_qflush = ixgbe_qflush;
+#endif
+       ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
+
+       ether_ifattach(ifp, adapter->hw.mac.addr);
+
+       adapter->max_frame_size =
+           ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       /*
+        * Tell the upper layer(s) we support long frames.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+       ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+                            |  IFCAP_VLAN_HWTSO
+                            |  IFCAP_VLAN_MTU;
+       ifp->if_capenable = ifp->if_capabilities;
+
+       /* Don't enable LRO by default */
+       ifp->if_capabilities |= IFCAP_LRO;
+
+       /*
+       ** Don't turn this on by default, if vlans are
+       ** created on another pseudo device (eg. lagg)
+       ** then vlan events are not passed thru, breaking
+       ** operation, but with HW FILTER off it works. If
+       ** using vlans directly on the ixgbe driver you can
+       ** enable this and get full hardware tag filtering.
+       */
+       ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&adapter->media, IFM_IMASK, ixgbe_media_change,
+                    ixgbe_media_status);
+       ifmedia_add(&adapter->media, IFM_ETHER | adapter->optics, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | adapter->optics);
+       if (hw->device_id == IXGBE_DEV_ID_82598AT) {
+               ifmedia_add(&adapter->media,
+                   IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+               ifmedia_add(&adapter->media,
+                   IFM_ETHER | IFM_1000_T, 0, NULL);
+       }
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+       return (0);
+}
+
+static void
+ixgbe_config_link(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32     autoneg, err = 0;
+       bool    sfp, negotiate;
+
+       sfp = ixgbe_is_sfp(hw);
+
+       if (sfp) { 
+               if (hw->phy.multispeed_fiber) {
+                       hw->mac.ops.setup_sfp(hw);
+                       ixgbe_enable_tx_laser(hw);
+                       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+               } else
+                       taskqueue_enqueue(adapter->tq, &adapter->mod_task);
+       } else {
+               if (hw->mac.ops.check_link)
+                       err = ixgbe_check_link(hw, &autoneg,
+                           &adapter->link_up, FALSE);
+               if (err)
+                       goto out;
+               autoneg = hw->phy.autoneg_advertised;
+               if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+                       err  = hw->mac.ops.get_link_capabilities(hw,
+                           &autoneg, &negotiate);
+               if (err)
+                       goto out;
+               if (hw->mac.ops.setup_link)
+                       err = hw->mac.ops.setup_link(hw, autoneg,
+                           negotiate, adapter->link_up);
+       }
+out:
+       return;
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+ixgbe_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+       if (error)
+               return;
+       *(bus_addr_t *) arg = segs->ds_addr;
+       return;
+}
+
+static int
+ixgbe_dma_malloc(struct adapter *adapter, bus_size_t size,
+               struct ixgbe_dma_alloc *dma, int mapflags)
+{
+       device_t dev = adapter->dev;
+       int             r;
+
+       r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
+                              DBA_ALIGN, 0,    /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,      /* filter, filterarg */
+                              size,    /* maxsize */
+                              1,       /* nsegments */
+                              size,    /* maxsegsize */
+                              BUS_DMA_ALLOCNOW,        /* flags */
+                              NULL,    /* lockfunc */
+                              NULL,    /* lockfuncarg */
+                              &dma->dma_tag);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dma_tag_create failed; "
+                      "error %u\n", r);
+               goto fail_0;
+       }
+       r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+                            BUS_DMA_NOWAIT, &dma->dma_map);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dmamem_alloc failed; "
+                      "error %u\n", r);
+               goto fail_1;
+       }
+       r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+                           size,
+                           ixgbe_dmamap_cb,
+                           &dma->dma_paddr,
+                           mapflags | BUS_DMA_NOWAIT);
+       if (r != 0) {
+               device_printf(dev,"ixgbe_dma_malloc: bus_dmamap_load failed; "
+                      "error %u\n", r);
+               goto fail_2;
+       }
+       dma->dma_size = size;
+       return (0);
+fail_2:
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+fail_1:
+       bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+       dma->dma_map = NULL;
+       dma->dma_tag = NULL;
+       return (r);
+}
+
+static void
+ixgbe_dma_free(struct adapter *adapter, struct ixgbe_dma_alloc *dma)
+{
+       bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+       bus_dma_tag_destroy(dma->dma_tag);
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the transmit and receive rings, and then
+ *  the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_queues(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       struct rx_ring  *rxr;
+       int rsize, tsize, error = IXGBE_SUCCESS;
+       int txconf = 0, rxconf = 0;
+
+        /* First allocate the top level queue structs */
+        if (!(adapter->queues =
+            (struct ix_queue *) malloc(sizeof(struct ix_queue) *
+            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+                device_printf(dev, "Unable to allocate queue memory\n");
+                error = ENOMEM;
+                goto fail;
+        }
+
+       /* First allocate the TX ring struct memory */
+       if (!(adapter->tx_rings =
+           (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate TX ring memory\n");
+               error = ENOMEM;
+               goto tx_fail;
+       }
+
+       /* Next allocate the RX */
+       if (!(adapter->rx_rings =
+           (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate RX ring memory\n");
+               error = ENOMEM;
+               goto rx_fail;
+       }
+
+       /* For the ring itself */
+       tsize = roundup2(adapter->num_tx_desc *
+           sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
+
+       /*
+        * Now set up the TX queues, txconf is needed to handle the
+        * possibility that things fail midcourse and we need to
+        * undo memory gracefully
+        */ 
+       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+               /* Set up some basics */
+               txr = &adapter->tx_rings[i];
+               txr->adapter = adapter;
+               txr->me = i;
+
+               /* Initialize the TX side lock */
+               snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+               if (ixgbe_dma_malloc(adapter, tsize,
+                       &txr->txdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate TX Descriptor memory\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+               txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
+               bzero((void *)txr->tx_base, tsize);
+
+               /* Now allocate transmit buffers for the ring */
+               if (ixgbe_allocate_transmit_buffers(txr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up transmit buffers\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#if __FreeBSD_version >= 800000
+               /* Allocate a buf ring */
+               txr->br = buf_ring_alloc(IXGBE_BR_SIZE, M_DEVBUF,
+                   M_WAITOK, &txr->tx_mtx);
+               if (txr->br == NULL) {
+                       device_printf(dev,
+                           "Critical Failure setting up buf ring\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#endif
+       }
+
+       /*
+        * Next the RX queues...
+        */ 
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+               rxr = &adapter->rx_rings[i];
+               /* Set up some basics */
+               rxr->adapter = adapter;
+               rxr->me = i;
+
+               /* Initialize the RX side lock */
+               snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+                   device_get_nameunit(dev), rxr->me);
+               mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+               if (ixgbe_dma_malloc(adapter, rsize,
+                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate RxDescriptor memory\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+               rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+               bzero((void *)rxr->rx_base, rsize);
+
+               /* Allocate receive buffers for the ring*/
+               if (ixgbe_allocate_receive_buffers(rxr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up receive buffers\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+       }
+
+       /*
+       ** Finally set up the queue holding structs
+       */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               que->adapter = adapter;
+               que->txr = &adapter->tx_rings[i];
+               que->rxr = &adapter->rx_rings[i];
+       }
+
+       return (0);
+
+err_rx_desc:
+       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+               ixgbe_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+               ixgbe_dma_free(adapter, &txr->txdma);
+       free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+       free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+       free(adapter->queues, M_DEVBUF);
+fail:
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       device_t dev = adapter->dev;
+       struct ixgbe_tx_buf *txbuf;
+       int error, i;
+
+       /*
+        * Setup DMA descriptor areas.
+        */
+       if ((error = bus_dma_tag_create(NULL,           /* parent */
+                              1, 0,            /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,              /* filter, filterarg */
+                              IXGBE_TSO_SIZE,          /* maxsize */
+                              adapter->num_segs,       /* nsegments */
+                              PAGE_SIZE,               /* maxsegsize */
+                              0,                       /* flags */
+                              NULL,                    /* lockfunc */
+                              NULL,                    /* lockfuncarg */
+                              &txr->txtag))) {
+               device_printf(dev,"Unable to allocate TX DMA tag\n");
+               goto fail;
+       }
+
+       if (!(txr->tx_buffers =
+           (struct ixgbe_tx_buf *) malloc(sizeof(struct ixgbe_tx_buf) *
+           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate tx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+        /* Create the descriptor buffer dma maps */
+       txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+               if (error != 0) {
+                       device_printf(dev, "Unable to create TX DMA map\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       /* We free all, it handles case where we are in the middle */
+       ixgbe_free_transmit_structures(adapter);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_transmit_ring(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_tx_buf *txbuf;
+       int i;
+
+       /* Clear the old ring contents */
+       IXGBE_TX_LOCK(txr);
+       bzero((void *)txr->tx_base,
+             (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
+       /* Reset indices */
+       txr->next_avail_desc = 0;
+       txr->next_to_clean = 0;
+
+       /* Free any existing tx buffers. */
+        txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, txbuf->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+               /* Clear the EOP index */
+               txbuf->eop_index = -1;
+        }
+
+#ifdef IXGBE_FDIR
+       /* Set the rate at which we sample packets */
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB)
+               txr->atr_sample = atr_sample_rate;
+#endif
+
+       /* Set number of descriptors available */
+       txr->tx_avail = adapter->num_tx_desc;
+
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       IXGBE_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all transmit rings.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               ixgbe_setup_transmit_ring(txr);
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+ixgbe_initialize_transmit_units(struct adapter *adapter)
+{
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+       /* Setup the Base and Length of the Tx Descriptor Ring */
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               u64     tdba = txr->txdma.dma_paddr;
+               u32     txctrl;
+
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+                      (tdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+                   adapter->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+
+               /* Setup the HW Tx Head and Tail descriptor pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+
+               /* Setup Transmit Descriptor Cmd Settings */
+               txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+
+               /* Disable Head Writeback */
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+                       break;
+                }
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
+                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), txctrl);
+                       break;
+               }
+
+       }
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               u32 dmatxctl, rttdcs;
+               dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+               dmatxctl |= IXGBE_DMATXCTL_TE;
+               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+               /* Disable arbiter to set MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+               IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+       }
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXGBE_TX_LOCK(txr);
+               ixgbe_free_transmit_buffers(txr);
+               ixgbe_dma_free(adapter, &txr->txdma);
+               IXGBE_TX_UNLOCK(txr);
+               IXGBE_TX_LOCK_DESTROY(txr);
+       }
+       free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_tx_buf *tx_buffer;
+       int             i;
+
+       INIT_DEBUGOUT("free_transmit_ring: begin");
+
+       if (txr->tx_buffers == NULL)
+               return;
+
+       tx_buffer = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+               if (tx_buffer->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       m_freem(tx_buffer->m_head);
+                       tx_buffer->m_head = NULL;
+                       if (tx_buffer->map != NULL) {
+                               bus_dmamap_destroy(txr->txtag,
+                                   tx_buffer->map);
+                               tx_buffer->map = NULL;
+                       }
+               } else if (tx_buffer->map != NULL) {
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       bus_dmamap_destroy(txr->txtag,
+                           tx_buffer->map);
+                       tx_buffer->map = NULL;
+               }
+       }
+#if __FreeBSD_version >= 800000
+       if (txr->br != NULL)
+               buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       if (txr->tx_buffers != NULL) {
+               free(txr->tx_buffers, M_DEVBUF);
+               txr->tx_buffers = NULL;
+       }
+       if (txr->txtag != NULL) {
+               bus_dma_tag_destroy(txr->txtag);
+               txr->txtag = NULL;
+       }
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Advanced Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static boolean_t
+ixgbe_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixgbe_tx_buf        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct ip6_hdr *ip6;
+       int  ehdrlen, ip_hlen = 0;
+       u16     etype;
+       u8      ipproto = 0;
+       bool    offload = TRUE;
+       int ctxd = txr->next_avail_desc;
+       u16 vtag = 0;
+
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       /*
+       ** In advanced descriptors the vlan tag must 
+       ** be placed into the descriptor itself.
+       */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       } else if (offload == FALSE)
+               return FALSE;
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present,
+        * helpful for QinQ too.
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               etype = ntohs(eh->evl_proto);
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       } else {
+               etype = ntohs(eh->evl_encap_proto);
+               ehdrlen = ETHER_HDR_LEN;
+       }
+
+       /* Set the ether header length */
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+       switch (etype) {
+               case ETHERTYPE_IP:
+                       ip = (struct ip *)(mp->m_data + ehdrlen);
+                       ip_hlen = ip->ip_hl << 2;
+                       ipproto = ip->ip_p;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+                       break;
+               case ETHERTYPE_IPV6:
+                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+                       ip_hlen = sizeof(struct ip6_hdr);
+                       ipproto = ip6->ip6_nxt;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+                       break;
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       vlan_macip_lens |= ip_hlen;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+       switch (ipproto) {
+               case IPPROTO_TCP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                       break;
+
+               case IPPROTO_UDP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+                       break;
+
+#if __FreeBSD_version >= 800000
+               case IPPROTO_SCTP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+                       break;
+#endif
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(0);
+
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+       txr->next_avail_desc = ctxd;
+       --txr->tx_avail;
+
+        return (offload);
+}
+
+/**********************************************************************
+ *
+ *  Setup work for hardware segmentation offload (TSO) on
+ *  adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static boolean_t
+ixgbe_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixgbe_tx_buf        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       u32 mss_l4len_idx = 0;
+       u16 vtag = 0;
+       int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct tcphdr *th;
+
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       else
+               ehdrlen = ETHER_HDR_LEN;
+
+        /* Ensure we have at least the IP+TCP header in the first mbuf. */
+        if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+               return FALSE;
+
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       if (ip->ip_p != IPPROTO_TCP)
+               return FALSE;   /* 0 */
+       ip->ip_sum = 0;
+       ip_hlen = ip->ip_hl << 2;
+       th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+       th->th_sum = in_pseudo(ip->ip_src.s_addr,
+           ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+       tcp_hlen = th->th_off << 2;
+       hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+       /* This is used in the transmit desc in encap */
+       *paylen = mp->m_pkthdr.len - hdrlen;
+
+       /* VLAN MACLEN IPLEN */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       }
+
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->next_avail_desc = ctxd;
+       return TRUE;
+}
+
+#ifdef IXGBE_FDIR
+/*
+** This routine parses packet headers so that Flow
+** Director can make a hashed filter table entry 
+** allowing traffic flows to be identified and kept
+** on the same cpu.  This would be a performance
+** hit, but we only do it at IXGBE_FDIR_RATE of
+** packets.
+*/
+static void
+ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter                  *adapter = txr->adapter;
+       struct ix_queue                 *que;
+       struct ip                       *ip;
+       struct tcphdr                   *th;
+       struct udphdr                   *uh;
+       struct ether_vlan_header        *eh;
+       union ixgbe_atr_hash_dword      input = {.dword = 0}; 
+       union ixgbe_atr_hash_dword      common = {.dword = 0}; 
+       int                             ehdrlen, ip_hlen;
+       u16                             etype;
+
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+               etype = eh->evl_proto;
+       } else {
+               ehdrlen = ETHER_HDR_LEN;
+               etype = eh->evl_encap_proto;
+       }
+
+       /* Only handling IPv4 */
+       if (etype != htons(ETHERTYPE_IP))
+               return;
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       ip_hlen = ip->ip_hl << 2;
+
+       /* check if we're UDP or TCP */
+       switch (ip->ip_p) {
+       case IPPROTO_TCP:
+               th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+               /* src and dst are inverted */
+               common.port.dst ^= th->th_sport;
+               common.port.src ^= th->th_dport;
+               input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_TCPV4;
+               break;
+       case IPPROTO_UDP:
+               uh = (struct udphdr *)((caddr_t)ip + ip_hlen);
+               /* src and dst are inverted */
+               common.port.dst ^= uh->uh_sport;
+               common.port.src ^= uh->uh_dport;
+               input.formatted.flow_type ^= IXGBE_ATR_FLOW_TYPE_UDPV4;
+               break;
+       default:
+               return;
+       }
+
+       input.formatted.vlan_id = htobe16(mp->m_pkthdr.ether_vtag);
+       if (mp->m_pkthdr.ether_vtag)
+               common.flex_bytes ^= htons(ETHERTYPE_VLAN);
+       else
+               common.flex_bytes ^= etype;
+       common.ip ^= ip->ip_src.s_addr ^ ip->ip_dst.s_addr;
+
+       que = &adapter->queues[txr->me];
+       /*
+       ** This assumes the Rx queue and Tx
+       ** queue are bound to the same CPU
+       */
+       ixgbe_fdir_add_signature_filter_82599(&adapter->hw,
+           input, common, que->msix);
+}
+#endif /* IXGBE_FDIR */
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+static boolean_t
+ixgbe_txeof(struct tx_ring *txr)
+{
+       struct adapter  *adapter = txr->adapter;
+       struct ifnet    *ifp = adapter->ifp;
+       u32     first, last, done, processed;
+       struct ixgbe_tx_buf *tx_buffer;
+       struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
+
+       mtx_assert(&txr->tx_mtx, MA_OWNED);
+
+       if (txr->tx_avail == adapter->num_tx_desc) {
+               txr->queue_status = IXGBE_QUEUE_IDLE;
+               return FALSE;
+       }
+
+       processed = 0;
+       first = txr->next_to_clean;
+       tx_buffer = &txr->tx_buffers[first];
+       /* For cleanup we just use legacy struct */
+       tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+       last = tx_buffer->eop_index;
+       if (last == -1)
+               return FALSE;
+       eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+
+       /*
+       ** Get the index of the first descriptor
+       ** BEYOND the EOP and call that 'done'.
+       ** I do this so the comparison in the
+       ** inner while loop below can be simple
+       */
+       if (++last == adapter->num_tx_desc) last = 0;
+       done = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_POSTREAD);
+       /*
+       ** Only the EOP descriptor of a packet now has the DD
+       ** bit set, this is what we look for...
+       */
+       while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
+               /* We clean the range of the packet */
+               while (first != done) {
+                       tx_desc->upper.data = 0;
+                       tx_desc->lower.data = 0;
+                       tx_desc->buffer_addr = 0;
+                       ++txr->tx_avail;
+                       ++processed;
+
+                       if (tx_buffer->m_head) {
+                               txr->bytes +=
+                                   tx_buffer->m_head->m_pkthdr.len;
+                               bus_dmamap_sync(txr->txtag,
+                                   tx_buffer->map,
+                                   BUS_DMASYNC_POSTWRITE);
+                               bus_dmamap_unload(txr->txtag,
+                                   tx_buffer->map);
+                               m_freem(tx_buffer->m_head);
+                               tx_buffer->m_head = NULL;
+                               tx_buffer->map = NULL;
+                       }
+                       tx_buffer->eop_index = -1;
+                       txr->watchdog_time = ticks;
+
+                       if (++first == adapter->num_tx_desc)
+                               first = 0;
+
+                       tx_buffer = &txr->tx_buffers[first];
+                       tx_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+               }
+               ++txr->packets;
+               ++ifp->if_opackets;
+               /* See if there is more work now */
+               last = tx_buffer->eop_index;
+               if (last != -1) {
+                       eop_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+                       /* Get next done point */
+                       if (++last == adapter->num_tx_desc) last = 0;
+                       done = last;
+               } else
+                       break;
+       }
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       txr->next_to_clean = first;
+
+       /*
+       ** Watchdog calculation, we know there's
+       ** work outstanding or the first return
+       ** would have been taken, so none processed
+       ** for too long indicates a hang.
+       */
+       if ((!processed) && ((ticks - txr->watchdog_time) > IXGBE_WATCHDOG))
+               txr->queue_status = IXGBE_QUEUE_HUNG;
+
+       /*
+        * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
+        * it is OK to send packets. If there are no pending descriptors,
+        * clear the timeout. Otherwise, if some descriptors have been freed,
+        * restart the timeout.
+        */
+       if (txr->tx_avail > IXGBE_TX_CLEANUP_THRESHOLD) {
+               ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+               if (txr->tx_avail == adapter->num_tx_desc) {
+                       txr->queue_status = IXGBE_QUEUE_IDLE;
+                       return FALSE;
+               }
+       }
+
+       return TRUE;
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixgbe_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+       struct adapter          *adapter = rxr->adapter;
+       bus_dma_segment_t       hseg[1];
+       bus_dma_segment_t       pseg[1];
+       struct ixgbe_rx_buf     *rxbuf;
+       struct mbuf             *mh, *mp;
+       int                     i, j, nsegs, error;
+       bool                    refreshed = FALSE;
+
+       i = j = rxr->next_to_refresh;
+       /* Control the loop with one beyond */
+       if (++j == adapter->num_rx_desc)
+               j = 0;
+
+       while (j != limit) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxr->hdr_split == FALSE)
+                       goto no_split;
+
+               if (rxbuf->m_head == NULL) {
+                       mh = m_gethdr(M_DONTWAIT, MT_DATA);
+                       if (mh == NULL)
+                               goto update;
+               } else
+                       mh = rxbuf->m_head;
+
+               mh->m_pkthdr.len = mh->m_len = MHLEN;
+               mh->m_len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: hdr dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mh);
+                       rxbuf->m_head = NULL;
+                       goto update;
+               }
+               rxbuf->m_head = mh;
+               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.hdr_addr =
+                   htole64(hseg[0].ds_addr);
+
+no_split:
+               if (rxbuf->m_pack == NULL) {
+                       mp = m_getjcl(M_DONTWAIT, MT_DATA,
+                           M_PKTHDR, adapter->rx_mbuf_sz);
+                       if (mp == NULL)
+                               goto update;
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("Refresh mbufs: payload dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
+               }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
+
+               refreshed = TRUE;
+               /* Next is precalculated */
+               i = j;
+               rxr->next_to_refresh = i;
+               if (++j == adapter->num_rx_desc)
+                       j = 0;
+       }
+update:
+       if (refreshed) /* Update hardware tail index */
+               IXGBE_WRITE_REG(&adapter->hw,
+                   IXGBE_RDT(rxr->me), rxr->next_to_refresh);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per received packet, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've allocated.
+ *
+ **********************************************************************/
+static int
+ixgbe_allocate_receive_buffers(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       device_t                dev = adapter->dev;
+       struct ixgbe_rx_buf     *rxbuf;
+       int                     i, bsize, error;
+
+       bsize = sizeof(struct ixgbe_rx_buf) * adapter->num_rx_desc;
+       if (!(rxr->rx_buffers =
+           (struct ixgbe_rx_buf *) malloc(bsize,
+           M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate rx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MSIZE,               /* maxsize */
+                                  1,                   /* nsegments */
+                                  MSIZE,               /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->htag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MJUM16BYTES,         /* maxsize */
+                                  1,                   /* nsegments */
+                                  MJUM16BYTES,         /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->ptag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+               rxbuf = &rxr->rx_buffers[i];
+               error = bus_dmamap_create(rxr->htag,
+                   BUS_DMA_NOWAIT, &rxbuf->hmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX head map\n");
+                       goto fail;
+               }
+               error = bus_dmamap_create(rxr->ptag,
+                   BUS_DMA_NOWAIT, &rxbuf->pmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX pkt map\n");
+                       goto fail;
+               }
+       }
+
+       return (0);
+
+fail:
+       /* Frees all, but can handle partial completion */
+       ixgbe_free_receive_structures(adapter);
+       return (error);
+}
+
+/*
+** Used to detect a descriptor that has
+** been merged by Hardware RSC.
+*/
+static inline u32
+ixgbe_rsc_count(union ixgbe_adv_rx_desc *rx)
+{
+       return (le32toh(rx->wb.lower.lo_dword.data) &
+           IXGBE_RXDADV_RSCCNT_MASK) >> IXGBE_RXDADV_RSCCNT_SHIFT;
+}
+
+/*********************************************************************
+ *
+ *  Initialize Hardware RSC (LRO) feature on 82599
+ *  for an RX ring, this is toggled by the LRO capability
+ *  even though it is transparent to the stack.
+ *
+ **********************************************************************/
+static void
+ixgbe_setup_hw_rsc(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       struct  ixgbe_hw        *hw = &adapter->hw;
+       u32                     rscctrl, rdrxctl;
+
+       rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+       rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+       rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+       rdrxctl |= IXGBE_RDRXCTL_RSCACKC;
+       IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(rxr->me));
+       rscctrl |= IXGBE_RSCCTL_RSCEN;
+
+       /*
+       ** Limit the total number of descriptors that
+       ** can be combined, so it does not exceed 64K
+       */
+       if (adapter->rx_mbuf_sz == MCLBYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
+       else if (adapter->rx_mbuf_sz == MJUMPAGESIZE)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
+       else if (adapter->rx_mbuf_sz == MJUM9BYTES)
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
+       else  /* Using 16K cluster */
+               rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(rxr->me), rscctrl);
+
+       /* Enable TCP header recognition */
+       IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0),
+           (IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)) |
+           IXGBE_PSRTYPE_TCPHDR));
+
+       /* Disable RSC for ACK packets */
+       IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
+           (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
+
+       rxr->hw_rsc = TRUE;
+}
+
+
+static void     
+ixgbe_free_receive_ring(struct rx_ring *rxr)
+{ 
+       struct  adapter         *adapter;
+       struct ixgbe_rx_buf       *rxbuf;
+       int i;
+
+       adapter = rxr->adapter;
+       for (i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                       rxbuf->m_head->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_head);
+               }
+               if (rxbuf->m_pack != NULL) {
+                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                       rxbuf->m_pack->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_pack);
+               }
+               rxbuf->m_head = NULL;
+               rxbuf->m_pack = NULL;
+       }
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter;
+       struct ifnet            *ifp;
+       device_t                dev;
+       struct ixgbe_rx_buf     *rxbuf;
+       bus_dma_segment_t       pseg[1], hseg[1];
+       struct lro_ctrl         *lro = &rxr->lro;
+       int                     rsize, nsegs, error = 0;
+
+       adapter = rxr->adapter;
+       ifp = adapter->ifp;
+       dev = adapter->dev;
+
+       /* Clear the ring contents */
+       IXGBE_RX_LOCK(rxr);
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       bzero((void *)rxr->rx_base, rsize);
+
+       /* Free current RX buffer structs and their mbufs */
+       ixgbe_free_receive_ring(rxr);
+
+       /* Configure header split? */
+       if (ixgbe_header_split)
+               rxr->hdr_split = TRUE;
+
+       /* Now replenish the mbufs */
+       for (int j = 0; j != adapter->num_rx_desc; ++j) {
+               struct mbuf     *mh, *mp;
+
+               rxbuf = &rxr->rx_buffers[j];
+               /*
+               ** Don't allocate mbufs if not
+               ** doing header split, its wasteful
+               */ 
+               if (rxr->hdr_split == FALSE)
+                       goto skip_head;
+
+               /* First the header */
+               rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+               if (rxbuf->m_head == NULL) {
+                       error = ENOBUFS;
+                       goto fail;
+               }
+               m_adj(rxbuf->m_head, ETHER_ALIGN);
+               mh = rxbuf->m_head;
+               mh->m_len = mh->m_pkthdr.len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, rxbuf->m_head, hseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) /* Nothing elegant to do here */
+                       goto fail;
+               bus_dmamap_sync(rxr->htag,
+                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+               /* Now the payload cluster */
+               rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+                   M_PKTHDR, adapter->rx_mbuf_sz);
+               if (rxbuf->m_pack == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               mp = rxbuf->m_pack;
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0)
+                        goto fail;
+               bus_dmamap_sync(rxr->ptag,
+                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+       }
+
+
+       /* Setup our descriptor indices */
+       rxr->next_to_check = 0;
+       rxr->next_to_refresh = 0;
+       rxr->lro_enabled = FALSE;
+       rxr->rx_split_packets = 0;
+       rxr->rx_bytes = 0;
+       rxr->discard = FALSE;
+
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       /*
+       ** Now set up the LRO interface:
+       ** 82598 uses software LRO, the
+       ** 82599 and X540 use a hardware assist.
+       */
+       if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
+           (ifp->if_capenable & IFCAP_RXCSUM) &&
+           (ifp->if_capenable & IFCAP_LRO))
+               ixgbe_setup_hw_rsc(rxr);
+       else if (ifp->if_capenable & IFCAP_LRO) {
+               int err = tcp_lro_init(lro);
+               if (err) {
+                       device_printf(dev, "LRO Initialization failed!\n");
+                       goto fail;
+               }
+               INIT_DEBUGOUT("RX Soft LRO Initialized\n");
+               rxr->lro_enabled = TRUE;
+               lro->ifp = adapter->ifp;
+       }
+
+       IXGBE_RX_UNLOCK(rxr);
+       return (0);
+
+fail:
+       ixgbe_free_receive_ring(rxr);
+       IXGBE_RX_UNLOCK(rxr);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+ixgbe_setup_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+       int j;
+
+       for (j = 0; j < adapter->num_queues; j++, rxr++)
+               if (ixgbe_setup_receive_ring(rxr))
+                       goto fail;
+
+       return (0);
+fail:
+       /*
+        * Free RX buffers allocated so far, we will only handle
+        * the rings that completed, the failing case will have
+        * cleaned up for itself. 'j' failed, so its the terminus.
+        */
+       for (int i = 0; i < j; ++i) {
+               rxr = &adapter->rx_rings[i];
+               ixgbe_free_receive_ring(rxr);
+       }
+
+       return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ *  Setup receive registers and features.
+ *
+ **********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void
+ixgbe_initialize_receive_units(struct adapter *adapter)
+{
+       struct  rx_ring *rxr = adapter->rx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ifnet   *ifp = adapter->ifp;
+       u32             bufsz, rxctrl, fctrl, srrctl, rxcsum;
+       u32             reta, mrqc = 0, hlreg, random[10];
+
+
+       /*
+        * Make sure receives are disabled while
+        * setting up the descriptor ring
+        */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL,
+           rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+       /* Enable broadcasts */
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF;
+       fctrl |= IXGBE_FCTRL_PMCF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+       /* Set for Jumbo Frames? */
+       hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       if (ifp->if_mtu > ETHERMTU)
+               hlreg |= IXGBE_HLREG0_JUMBOEN;
+       else
+               hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+       bufsz = adapter->rx_mbuf_sz  >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               u64 rdba = rxr->rxdma.dma_paddr;
+
+               /* Setup the Base and Length of the Rx Descriptor Ring */
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+                              (rdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i), (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+                   adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+               /* Set up the SRRCTL register */
+               srrctl = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
+               srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+               srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+               srrctl |= bufsz;
+               if (rxr->hdr_split) {
+                       /* Use a standard mbuf for the header */
+                       srrctl |= ((IXGBE_RX_HDR <<
+                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+                           & IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               } else
+                       srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+
+               /* Setup the HW Rx Head and Tail Descriptor Pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+       }
+
+       if (adapter->hw.mac.type != ixgbe_mac_82598EB) {
+               u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+                             IXGBE_PSRTYPE_UDPHDR |
+                             IXGBE_PSRTYPE_IPV4HDR |
+                             IXGBE_PSRTYPE_IPV6HDR;
+               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+       }
+
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+       /* Setup RSS */
+       if (adapter->num_queues > 1) {
+               int i, j;
+               reta = 0;
+
+               /* set up random bits */
+               arc4rand(&random, sizeof(random), 0);
+
+               /* Set up the redirection table */
+               for (i = 0, j = 0; i < 128; i++, j++) {
+                       if (j == adapter->num_queues) j = 0;
+                       reta = (reta << 8) | (j * 0x11);
+                       if ((i & 3) == 3)
+                               IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
+               }
+
+               /* Now fill our hash function seeds */
+               for (int i = 0; i < 10; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
+
+               /* Perform hash on these packet types */
+               mrqc = IXGBE_MRQC_RSSEN
+                    | IXGBE_MRQC_RSS_FIELD_IPV4
+                    | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV4_UDP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+                    | IXGBE_MRQC_RSS_FIELD_IPV6
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
+                    | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+               IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+               /* RSS and RX IPP Checksum are mutually exclusive */
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+       }
+
+       if (ifp->if_capenable & IFCAP_RXCSUM)
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+
+       if (!(rxcsum & IXGBE_RXCSUM_PCSD))
+               rxcsum |= IXGBE_RXCSUM_IPPCSE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all receive rings.
+ *
+ **********************************************************************/
+static void
+ixgbe_free_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               struct lro_ctrl         *lro = &rxr->lro;
+               ixgbe_free_receive_buffers(rxr);
+               /* Free LRO memory */
+               tcp_lro_free(lro);
+               /* Free the ring memory as well */
+               ixgbe_dma_free(adapter, &rxr->rxdma);
+       }
+
+       free(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ *  Free receive ring data structures
+ *
+ **********************************************************************/
+static void
+ixgbe_free_receive_buffers(struct rx_ring *rxr)
+{
+       struct adapter          *adapter = rxr->adapter;
+       struct ixgbe_rx_buf     *rxbuf;
+
+       INIT_DEBUGOUT("free_receive_structures: begin");
+
+       /* Cleanup any existing buffers */
+       if (rxr->rx_buffers != NULL) {
+               for (int i = 0; i < adapter->num_rx_desc; i++) {
+                       rxbuf = &rxr->rx_buffers[i];
+                       if (rxbuf->m_head != NULL) {
+                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                               rxbuf->m_head->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_head);
+                       }
+                       if (rxbuf->m_pack != NULL) {
+                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                               rxbuf->m_pack->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_pack);
+                       }
+                       rxbuf->m_head = NULL;
+                       rxbuf->m_pack = NULL;
+                       if (rxbuf->hmap != NULL) {
+                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+                               rxbuf->hmap = NULL;
+                       }
+                       if (rxbuf->pmap != NULL) {
+                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+                               rxbuf->pmap = NULL;
+                       }
+               }
+               if (rxr->rx_buffers != NULL) {
+                       free(rxr->rx_buffers, M_DEVBUF);
+                       rxr->rx_buffers = NULL;
+               }
+       }
+
+       if (rxr->htag != NULL) {
+               bus_dma_tag_destroy(rxr->htag);
+               rxr->htag = NULL;
+       }
+       if (rxr->ptag != NULL) {
+               bus_dma_tag_destroy(rxr->ptag);
+               rxr->ptag = NULL;
+       }
+
+       return;
+}
+
+static __inline void
+ixgbe_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+                 
+        /*
+         * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+         * should be computed by hardware. Also it should not have VLAN tag in
+         * ethernet header.
+         */
+        if (rxr->lro_enabled &&
+            (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+            (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
+            (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+                /*
+                 * Send to the stack if:
+                 **  - LRO not enabled, or
+                 **  - no LRO resources, or
+                 **  - lro enqueue fails
+                 */
+                if (rxr->lro.lro_cnt != 0)
+                        if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                                return;
+        }
+       IXGBE_RX_UNLOCK(rxr);
+        (*ifp->if_input)(ifp, m);
+       IXGBE_RX_LOCK(rxr);
+}
+
+static __inline void
+ixgbe_rx_discard(struct rx_ring *rxr, int i)
+{
+       struct ixgbe_rx_buf     *rbuf;
+
+       rbuf = &rxr->rx_buffers[i];
+
+        if (rbuf->fmp != NULL) {/* Partial chain ? */
+               rbuf->fmp->m_flags |= M_PKTHDR;
+                m_freem(rbuf->fmp);
+                rbuf->fmp = NULL;
+       }
+
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+static bool
+ixgbe_rxeof(struct ix_queue *que, int count)
+{
+       struct adapter          *adapter = que->adapter;
+       struct rx_ring          *rxr = que->rxr;
+       struct ifnet            *ifp = adapter->ifp;
+       struct lro_ctrl         *lro = &rxr->lro;
+       struct lro_entry        *queued;
+       int                     i, nextp, processed = 0;
+       u32                     staterr = 0;
+       union ixgbe_adv_rx_desc *cur;
+       struct ixgbe_rx_buf     *rbuf, *nbuf;
+
+       IXGBE_RX_LOCK(rxr);
+
+       for (i = rxr->next_to_check; count != 0;) {
+               struct mbuf     *sendmp, *mh, *mp;
+               u32             rsc, ptype;
+               u16             hlen, plen, hdr, vtag;
+               bool            eop;
+               /* Sync the ring. */
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+
+               if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+                       break;
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+
+               count--;
+               sendmp = NULL;
+               nbuf = NULL;
+               rsc = 0;
+               cur->wb.upper.status_error = 0;
+               rbuf = &rxr->rx_buffers[i];
+               mh = rbuf->m_head;
+               mp = rbuf->m_pack;
+
+               plen = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) &
+                   IXGBE_RXDADV_PKTTYPE_MASK;
+               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+               vtag = le16toh(cur->wb.upper.vlan);
+               eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+               /* Make sure bad packets are discarded */
+               if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
+                   (rxr->discard)) {
+                       ifp->if_ierrors++;
+                       rxr->rx_discarded++;
+                       if (eop)
+                               rxr->discard = FALSE;
+                       else
+                               rxr->discard = TRUE;
+                       ixgbe_rx_discard(rxr, i);
+                       goto next_desc;
+               }
+
+               /*
+               ** On 82599 which supports a hardware
+               ** LRO (called HW RSC), packets need
+               ** not be fragmented across sequential
+               ** descriptors, rather the next descriptor
+               ** is indicated in bits of the descriptor.
+               ** This also means that we might proceses
+               ** more than one packet at a time, something
+               ** that has never been true before, it
+               ** required eliminating global chain pointers
+               ** in favor of what we are doing here.  -jfv
+               */
+               if (!eop) {
+                       /*
+                       ** Figure out the next descriptor
+                       ** of this frame.
+                       */
+                       if (rxr->hw_rsc == TRUE) {
+                               rsc = ixgbe_rsc_count(cur);
+                               rxr->rsc_num += (rsc - 1);
+                       }
+                       if (rsc) { /* Get hardware index */
+                               nextp = ((staterr &
+                                   IXGBE_RXDADV_NEXTP_MASK) >>
+                                   IXGBE_RXDADV_NEXTP_SHIFT);
+                       } else { /* Just sequential */
+                               nextp = i + 1;
+                               if (nextp == adapter->num_rx_desc)
+                                       nextp = 0;
+                       }
+                       nbuf = &rxr->rx_buffers[nextp];
+                       prefetch(nbuf);
+               }
+               /*
+               ** The header mbuf is ONLY used when header 
+               ** split is enabled, otherwise we get normal 
+               ** behavior, ie, both header and payload
+               ** are DMA'd into the payload buffer.
+               **
+               ** Rather than using the fmp/lmp global pointers
+               ** we now keep the head of a packet chain in the
+               ** buffer struct and pass this along from one
+               ** descriptor to the next, until we get EOP.
+               */
+               if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+                       /* This must be an initial descriptor */
+                       hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                           IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hlen > IXGBE_RX_HDR)
+                               hlen = IXGBE_RX_HDR;
+                       mh->m_len = hlen;
+                       mh->m_flags |= M_PKTHDR;
+                       mh->m_next = NULL;
+                       mh->m_pkthdr.len = mh->m_len;
+                       /* Null buf pointer so it is refreshed */
+                       rbuf->m_head = NULL;
+                       /*
+                       ** Check the payload length, this
+                       ** could be zero if its a small
+                       ** packet.
+                       */
+                       if (plen > 0) {
+                               mp->m_len = plen;
+                               mp->m_next = NULL;
+                               mp->m_flags &= ~M_PKTHDR;
+                               mh->m_next = mp;
+                               mh->m_pkthdr.len += mp->m_len;
+                               /* Null buf pointer so it is refreshed */
+                               rbuf->m_pack = NULL;
+                               rxr->rx_split_packets++;
+                       }
+                       /*
+                       ** Now create the forward
+                       ** chain so when complete 
+                       ** we wont have to.
+                       */
+                        if (eop == 0) {
+                               /* stash the chain head */
+                                nbuf->fmp = mh;
+                               /* Make forward chain */
+                                if (plen)
+                                        mp->m_next = nbuf->m_pack;
+                                else
+                                        mh->m_next = nbuf->m_pack;
+                        } else {
+                               /* Singlet, prepare to send */
+                                sendmp = mh;
+                                if ((adapter->num_vlans) &&
+                                 (staterr & IXGBE_RXD_STAT_VP)) {
+                                        sendmp->m_pkthdr.ether_vtag = vtag;
+                                        sendmp->m_flags |= M_VLANTAG;
+                                }
+                        }
+               } else {
+                       /*
+                       ** Either no header split, or a
+                       ** secondary piece of a fragmented
+                       ** split packet.
+                       */
+                       mp->m_len = plen;
+                       /*
+                       ** See if there is a stored head
+                       ** that determines what we are
+                       */
+                       sendmp = rbuf->fmp;
+                       rbuf->m_pack = rbuf->fmp = NULL;
+
+                       if (sendmp != NULL) /* secondary frag */
+                               sendmp->m_pkthdr.len += mp->m_len;
+                       else {
+                               /* first desc of a non-ps chain */
+                               sendmp = mp;
+                               sendmp->m_flags |= M_PKTHDR;
+                               sendmp->m_pkthdr.len = mp->m_len;
+                               if (staterr & IXGBE_RXD_STAT_VP) {
+                                       sendmp->m_pkthdr.ether_vtag = vtag;
+                                       sendmp->m_flags |= M_VLANTAG;
+                               }
+                        }
+                       /* Pass the head pointer on */
+                       if (eop == 0) {
+                               nbuf->fmp = sendmp;
+                               sendmp = NULL;
+                               mp->m_next = nbuf->m_pack;
+                       }
+               }
+               ++processed;
+               /* Sending this frame? */
+               if (eop) {
+                       sendmp->m_pkthdr.rcvif = ifp;
+                       ifp->if_ipackets++;
+                       rxr->rx_packets++;
+                       /* capture data for AIM */
+                       rxr->bytes += sendmp->m_pkthdr.len;
+                       rxr->rx_bytes += sendmp->m_pkthdr.len;
+                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+                               ixgbe_rx_checksum(staterr, sendmp, ptype);
+#if __FreeBSD_version >= 800000
+                       sendmp->m_pkthdr.flowid = que->msix;
+                       sendmp->m_flags |= M_FLOWID;
+#endif
+               }
+next_desc:
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+               /* Advance our pointers to the next descriptor. */
+               if (++i == adapter->num_rx_desc)
+                       i = 0;
+
+               /* Now send to the stack or do LRO */
+               if (sendmp != NULL) {
+                       rxr->next_to_check = i;
+                       ixgbe_rx_input(rxr, ifp, sendmp, ptype);
+                       i = rxr->next_to_check;
+               }
+
+               /* Every 8 descriptors we go to refresh mbufs */
+               if (processed == 8) {
+                       ixgbe_refresh_mbufs(rxr, i);
+                       processed = 0;
+               }
+       }
+
+       /* Refresh any remaining buf structs */
+       if (ixgbe_rx_unrefreshed(rxr))
+               ixgbe_refresh_mbufs(rxr, i);
+
+       rxr->next_to_check = i;
+
+       /*
+        * Flush any outstanding LRO work
+        */
+       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+               SLIST_REMOVE_HEAD(&lro->lro_active, next);
+               tcp_lro_flush(lro, queued);
+       }
+
+       IXGBE_RX_UNLOCK(rxr);
+
+       /*
+       ** We still have cleaning to do?
+       ** Schedule another interrupt if so.
+       */
+       if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
+               ixgbe_rearm_queues(adapter, (u64)(1 << que->msix));
+               return (TRUE);
+       }
+
+       return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixgbe_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+{
+       u16     status = (u16) staterr;
+       u8      errors = (u8) (staterr >> 24);
+       bool    sctp = FALSE;
+
+       if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
+               sctp = TRUE;
+
+       if (status & IXGBE_RXD_STAT_IPCS) {
+               if (!(errors & IXGBE_RXD_ERR_IPE)) {
+                       /* IP Checksum Good */
+                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+               } else
+                       mp->m_pkthdr.csum_flags = 0;
+       }
+       if (status & IXGBE_RXD_STAT_L4CS) {
+               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+               if (sctp)
+                       type = CSUM_SCTP_VALID;
+#endif
+               if (!(errors & IXGBE_RXD_ERR_TCPE)) {
+                       mp->m_pkthdr.csum_flags |= type;
+                       if (!sctp)
+                               mp->m_pkthdr.csum_data = htons(0xffff);
+               } 
+       }
+       return;
+}
+
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixgbe_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)   /* Not our event */
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXGBE_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] |= (1 << bit);
+       ++adapter->num_vlans;
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixgbe_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXGBE_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       adapter->shadow_vfta[index] &= ~(1 << bit);
+       --adapter->num_vlans;
+       /* Re-init to load the changes */
+       ixgbe_init_locked(adapter);
+       IXGBE_CORE_UNLOCK(adapter);
+}
+
+static void
+ixgbe_setup_vlan_hw_support(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             ctrl;
+
+
+       /*
+       ** We get here thru init_locked, meaning
+       ** a soft reset, this has already cleared
+       ** the VFTA and other state, so if there
+       ** have been no vlan's registered do nothing.
+       */
+       if (adapter->num_vlans == 0)
+               return;
+
+       /*
+       ** A soft reset zero's out the VFTA, so
+       ** we need to repopulate it now.
+       */
+       for (int i = 0; i < IXGBE_VFTA_SIZE; i++)
+               if (adapter->shadow_vfta[i] != 0)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTA(i),
+                           adapter->shadow_vfta[i]);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       /* Enable the Filter Table if enabled */
+       if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) {
+               ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+               ctrl |= IXGBE_VLNCTRL_VFE;
+       }
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               ctrl |= IXGBE_VLNCTRL_VME;
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
+
+       /* On 82599 the VLAN enable is per/queue in RXDCTL */
+       if (hw->mac.type != ixgbe_mac_82598EB)
+               for (int i = 0; i < adapter->num_queues; i++) {
+                       ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                               ctrl |= IXGBE_RXDCTL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+               }
+}
+
+static void
+ixgbe_enable_intr(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ix_queue *que = adapter->queues;
+       u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+
+       /* Enable Fan Failure detection */
+       if (hw->device_id == IXGBE_DEV_ID_82598AT)
+                   mask |= IXGBE_EIMS_GPI_SDP1;
+       else {
+                   mask |= IXGBE_EIMS_ECC;
+                   mask |= IXGBE_EIMS_GPI_SDP0;
+                   mask |= IXGBE_EIMS_GPI_SDP1;
+                   mask |= IXGBE_EIMS_GPI_SDP2;
+#ifdef IXGBE_FDIR
+                   mask |= IXGBE_EIMS_FLOW_DIR;
+#endif
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+
+       /* With RSS we use auto clear */
+       if (adapter->msix_mem) {
+               mask = IXGBE_EIMS_ENABLE_MASK;
+               /* Don't autoclear Link */
+               mask &= ~IXGBE_EIMS_OTHER;
+               mask &= ~IXGBE_EIMS_LSC;
+               IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+       }
+
+       /*
+       ** Now enable all queues, this is done separately to
+       ** allow for handling the extended (beyond 32) MSIX
+       ** vectors that can be used by 82599
+       */
+        for (int i = 0; i < adapter->num_queues; i++, que++)
+                ixgbe_enable_queue(adapter, que->msix);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       return;
+}
+
+static void
+ixgbe_disable_intr(struct adapter *adapter)
+{
+       if (adapter->msix_mem)
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 0);
+       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+       } else {
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+       }
+       IXGBE_WRITE_FLUSH(&adapter->hw);
+       return;
+}
+
+u16
+ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
+{
+       u16 value;
+
+       value = pci_read_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, 2);
+
+       return (value);
+}
+
+void
+ixgbe_write_pci_cfg(struct ixgbe_hw *hw, u32 reg, u16 value)
+{
+       pci_write_config(((struct ixgbe_osdep *)hw->back)->dev,
+           reg, value, 2);
+
+       return;
+}
+
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+**   (yes this is all very magic and confusing :)
+**  - entry is the register array entry
+**  - vector is the MSIX vector for this queue
+**  - type is RX/TX/MISC
+*/
+static void
+ixgbe_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 ivar, index;
+
+       vector |= IXGBE_IVAR_ALLOC_VAL;
+
+       switch (hw->mac.type) {
+
+       case ixgbe_mac_82598EB:
+               if (type == -1)
+                       entry = IXGBE_IVAR_OTHER_CAUSES_INDEX;
+               else
+                       entry += (type * 64);
+               index = (entry >> 2) & 0x1F;
+               ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(index));
+               ivar &= ~(0xFF << (8 * (entry & 0x3)));
+               ivar |= (vector << (8 * (entry & 0x3)));
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_IVAR(index), ivar);
+               break;
+
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (type == -1) { /* MISC IVAR */
+                       index = (entry & 1) * 8;
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, ivar);
+               } else {        /* RX/TX IVARS */
+                       index = (16 * (entry & 1)) + (8 * type);
+                       ivar = IXGBE_READ_REG(hw, IXGBE_IVAR(entry >> 1));
+                       ivar &= ~(0xFF << index);
+                       ivar |= (vector << index);
+                       IXGBE_WRITE_REG(hw, IXGBE_IVAR(entry >> 1), ivar);
+               }
+
+       default:
+               break;
+       }
+}
+
+static void
+ixgbe_configure_ivars(struct adapter *adapter)
+{
+       struct  ix_queue *que = adapter->queues;
+       u32 newitr;
+
+       if (ixgbe_max_interrupt_rate > 0)
+               newitr = (8000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
+       else
+               newitr = 0;
+
+        for (int i = 0; i < adapter->num_queues; i++, que++) {
+               /* First the RX queue entry */
+                ixgbe_set_ivar(adapter, i, que->msix, 0);
+               /* ... and the TX */
+               ixgbe_set_ivar(adapter, i, que->msix, 1);
+               /* Set an Initial EITR value */
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_EITR(que->msix), newitr);
+       }
+
+       /* For the Link interrupt */
+        ixgbe_set_ivar(adapter, 1, adapter->linkvec, -1);
+}
+
+/*
+** ixgbe_sfp_probe - called in the local timer to
+** determine if a port had optics inserted.
+*/  
+static bool ixgbe_sfp_probe(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       device_t        dev = adapter->dev;
+       bool            result = FALSE;
+
+       if ((hw->phy.type == ixgbe_phy_nl) &&
+           (hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
+               s32 ret = hw->phy.ops.identify_sfp(hw);
+               if (ret)
+                        goto out;
+               ret = hw->phy.ops.reset(hw);
+               if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+                       device_printf(dev,"Unsupported SFP+ module detected!");
+                       printf(" Reload driver with supported module.\n");
+                       adapter->sfp_probe = FALSE;
+                        goto out;
+               } else
+                       device_printf(dev,"SFP+ module detected!\n");
+               /* We now have supported optics */
+               adapter->sfp_probe = FALSE;
+               /* Set the optics type so system reports correctly */
+               ixgbe_setup_optics(adapter);
+               result = TRUE;
+       }
+out:
+       return (result);
+}
+
+/*
+** Tasklet handler for MSIX Link interrupts
+**  - do outside interrupt since it might sleep
+*/
+static void
+ixgbe_handle_link(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+
+       ixgbe_check_link(&adapter->hw,
+           &adapter->link_speed, &adapter->link_up, 0);
+               ixgbe_update_link_status(adapter);
+}
+
+/*
+** Tasklet for handling SFP module interrupts
+*/
+static void
+ixgbe_handle_mod(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ixgbe_hw *hw = &adapter->hw;
+       device_t        dev = adapter->dev;
+       u32 err;
+
+       err = hw->phy.ops.identify_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,
+                   "Unsupported SFP+ module type was detected.\n");
+               return;
+       }
+       err = hw->mac.ops.setup_sfp(hw);
+       if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
+               device_printf(dev,
+                   "Setup failure - unsupported SFP+ module type.\n");
+               return;
+       }
+       taskqueue_enqueue(adapter->tq, &adapter->msf_task);
+       return;
+}
+
+
+/*
+** Tasklet for handling MSF (multispeed fiber) interrupts
+*/
+static void
+ixgbe_handle_msf(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 autoneg;
+       bool negotiate;
+
+       autoneg = hw->phy.autoneg_advertised;
+       if ((!autoneg) && (hw->mac.ops.get_link_capabilities))
+               hw->mac.ops.get_link_capabilities(hw, &autoneg, &negotiate);
+       if (hw->mac.ops.setup_link)
+               hw->mac.ops.setup_link(hw, autoneg, negotiate, TRUE);
+       return;
+}
+
+#ifdef IXGBE_FDIR
+/*
+** Tasklet for reinitializing the Flow Director filter table
+*/
+static void
+ixgbe_reinit_fdir(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+       struct ifnet   *ifp = adapter->ifp;
+
+       if (adapter->fdir_reinit != 1) /* Shouldn't happen */
+               return;
+       ixgbe_reinit_fdir_tables_82599(&adapter->hw);
+       adapter->fdir_reinit = 0;
+       /* Restart the interface */
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       return;
+}
+#endif
+
+/**********************************************************************
+ *
+ *  Update the board statistics counters.
+ *
+ **********************************************************************/
+static void
+ixgbe_update_stats_counters(struct adapter *adapter)
+{
+       struct ifnet   *ifp = adapter->ifp;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32  missed_rx = 0, bprc, lxon, lxoff, total;
+       u64  total_missed_rx = 0;
+
+       adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+       adapter->stats.illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+       adapter->stats.errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+       adapter->stats.mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+       for (int i = 0; i < 8; i++) {
+               u32 mp;
+               mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               /* missed_rx tallies misses for the gprc workaround */
+               missed_rx += mp;
+               /* global total per queue */
+               adapter->stats.mpc[i] += mp;
+               /* Running comprehensive total for stats display */
+               total_missed_rx += adapter->stats.mpc[i];
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       adapter->stats.rnbc[i] +=
+                           IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+               adapter->stats.pxontxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+               adapter->stats.pxonrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+               adapter->stats.pxofftxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+               adapter->stats.pxoffrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               adapter->stats.pxon2offc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+       }
+       for (int i = 0; i < 16; i++) {
+               adapter->stats.qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               adapter->stats.qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               adapter->stats.qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+               adapter->stats.qbrc[i] += 
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_QBRC(i)) << 32);
+               adapter->stats.qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               adapter->stats.qbtc[i] +=
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_QBTC(i)) << 32);
+               adapter->stats.qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+       }
+       adapter->stats.mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+       adapter->stats.mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+       adapter->stats.rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+       /* Hardware workaround, gprc counts missed packets */
+       adapter->stats.gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+       adapter->stats.gprc -= missed_rx;
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               adapter->stats.lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               adapter->stats.lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               /* 82598 only has a counter in the high register */
+               adapter->stats.gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               adapter->stats.gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               adapter->stats.tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+       }
+
+       /*
+        * Workaround: mprc hardware is incorrectly counting
+        * broadcasts, so for now we subtract those.
+        */
+       bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+       adapter->stats.bprc += bprc;
+       adapter->stats.mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               adapter->stats.mprc -= bprc;
+
+       adapter->stats.prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+       adapter->stats.prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+       adapter->stats.prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+       adapter->stats.prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+       adapter->stats.prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+       adapter->stats.prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       adapter->stats.lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       adapter->stats.lxofftxc += lxoff;
+       total = lxon + lxoff;
+
+       adapter->stats.gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+       adapter->stats.mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       adapter->stats.ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       adapter->stats.gptc -= total;
+       adapter->stats.mptc -= total;
+       adapter->stats.ptc64 -= total;
+       adapter->stats.gotc -= total * ETHER_MIN_LEN;
+
+       adapter->stats.ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+       adapter->stats.rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+       adapter->stats.roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+       adapter->stats.rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+       adapter->stats.mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+       adapter->stats.mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+       adapter->stats.mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+       adapter->stats.tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       adapter->stats.tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+       adapter->stats.ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+       adapter->stats.ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+       adapter->stats.ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+       adapter->stats.ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+       adapter->stats.ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+       adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+       adapter->stats.xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+       adapter->stats.fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+       adapter->stats.fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+       /* Only read FCOE on 82599 */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               adapter->stats.fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               adapter->stats.fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               adapter->stats.fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               adapter->stats.fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               adapter->stats.fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+       }
+
+       /* Fill out the OS statistics structure */
+       ifp->if_ipackets = adapter->stats.gprc;
+       ifp->if_opackets = adapter->stats.gptc;
+       ifp->if_ibytes = adapter->stats.gorc;
+       ifp->if_obytes = adapter->stats.gotc;
+       ifp->if_imcasts = adapter->stats.mprc;
+       ifp->if_collisions = 0;
+
+       /* Rx Errors */
+       ifp->if_ierrors = total_missed_rx + adapter->stats.crcerrs +
+               adapter->stats.rlec;
+}
+
+/** ixgbe_sysctl_tdh_handler - Handler function
+ *  Retrieves the TDH value from the hardware
+ */
+static int 
+ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
+       if (!txr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDH(txr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_tdt_handler - Handler function
+ *  Retrieves the TDT value from the hardware
+ */
+static int 
+ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
+       if (!txr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&txr->adapter->hw, IXGBE_TDT(txr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_rdh_handler - Handler function
+ *  Retrieves the RDH value from the hardware
+ */
+static int 
+ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
+       if (!rxr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDH(rxr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/** ixgbe_sysctl_rdt_handler - Handler function
+ *  Retrieves the RDT value from the hardware
+ */
+static int 
+ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+
+       struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
+       if (!rxr) return 0;
+
+       unsigned val = IXGBE_READ_REG(&rxr->adapter->hw, IXGBE_RDT(rxr->me));
+       error = sysctl_handle_int(oidp, &val, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+static int
+ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       struct ix_queue *que = ((struct ix_queue *)oidp->oid_arg1);
+       unsigned int reg, usec, rate;
+
+       reg = IXGBE_READ_REG(&que->adapter->hw, IXGBE_EITR(que->msix));
+       usec = ((reg & 0x0FF8) >> 3);
+       if (usec > 0)
+               rate = 1000000 / usec;
+       else
+               rate = 0;
+       error = sysctl_handle_int(oidp, &rate, 0, req);
+       if (error || !req->newptr)
+               return error;
+       return 0;
+}
+
+/*
+ * Add sysctl variables, one per statistic, to the system.
+ */
+static void
+ixgbe_add_hw_stats(struct adapter *adapter)
+{
+
+       device_t dev = adapter->dev;
+
+       struct tx_ring *txr = adapter->tx_rings;
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+       struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+       struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+       struct ixgbe_hw_stats *stats = &adapter->stats;
+
+       struct sysctl_oid *stat_node, *queue_node;
+       struct sysctl_oid_list *stat_list, *queue_list;
+
+#define QUEUE_NAME_LEN 32
+       char namebuf[QUEUE_NAME_LEN];
+
+       /* Driver Statistics */
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
+                       CTLFLAG_RD, &adapter->dropped_pkts,
+                       "Driver dropped packets");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "mbuf_defrag_failed",
+                       CTLFLAG_RD, &adapter->mbuf_defrag_failed,
+                       "m_defrag() failed");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_tx_dma_setup",
+                       CTLFLAG_RD, &adapter->no_tx_dma_setup,
+                       "Driver tx dma failure in xmit");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+                       CTLFLAG_RD, &adapter->watchdog_events,
+                       "Watchdog timeouts");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tso_tx",
+                       CTLFLAG_RD, &adapter->tso_tx,
+                       "TSO");
+       SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
+                       CTLFLAG_RD, &adapter->link_irq,
+                       "Link MSIX IRQ Handled");
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+                               CTLTYPE_UINT | CTLFLAG_RD, &adapter->queues[i],
+                               sizeof(&adapter->queues[i]),
+                               ixgbe_sysctl_interrupt_rate_handler, "IU",
+                               "Interrupt Rate");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", 
+                               CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
+                               ixgbe_sysctl_tdh_handler, "IU",
+                               "Transmit Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", 
+                               CTLTYPE_UINT | CTLFLAG_RD, txr, sizeof(txr),
+                               ixgbe_sysctl_tdt_handler, "IU",
+                               "Transmit Descriptor Tail");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+                               CTLFLAG_RD, &txr->no_desc_avail,
+                               "Queue No Descriptor Available");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+                               CTLFLAG_RD, &txr->total_packets,
+                               "Queue Packets Transmitted");
+       }
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               struct lro_ctrl *lro = &rxr->lro;
+
+               snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
+               queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, 
+                                           CTLFLAG_RD, NULL, "Queue Name");
+               queue_list = SYSCTL_CHILDREN(queue_node);
+
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", 
+                               CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
+                               ixgbe_sysctl_rdh_handler, "IU",
+                               "Receive Descriptor Head");
+               SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", 
+                               CTLTYPE_UINT | CTLFLAG_RD, rxr, sizeof(rxr),
+                               ixgbe_sysctl_rdt_handler, "IU",
+                               "Receive Descriptor Tail");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+                               CTLFLAG_RD, &rxr->rx_packets,
+                               "Queue Packets Received");
+               SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+                               CTLFLAG_RD, &rxr->rx_bytes,
+                               "Queue Bytes Received");
+               SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_queued",
+                               CTLFLAG_RD, &lro->lro_queued, 0,
+                               "LRO Queued");
+               SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "lro_flushed",
+                               CTLFLAG_RD, &lro->lro_flushed, 0,
+                               "LRO Flushed");
+       }
+
+       /* MAC stats get the own sub node */
+
+       stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", 
+                                   CTLFLAG_RD, NULL, "MAC Statistics");
+       stat_list = SYSCTL_CHILDREN(stat_node);
+
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
+                       CTLFLAG_RD, &stats->crcerrs,
+                       "CRC Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "ill_errs",
+                       CTLFLAG_RD, &stats->illerrc,
+                       "Illegal Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "byte_errs",
+                       CTLFLAG_RD, &stats->errbc,
+                       "Byte Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "short_discards",
+                       CTLFLAG_RD, &stats->mspdc,
+                       "MAC Short Packets Discarded");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "local_faults",
+                       CTLFLAG_RD, &stats->mlfc,
+                       "MAC Local Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "remote_faults",
+                       CTLFLAG_RD, &stats->mrfc,
+                       "MAC Remote Faults");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rec_len_errs",
+                       CTLFLAG_RD, &stats->rlec,
+                       "Receive Length Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_txd",
+                       CTLFLAG_RD, &stats->lxontxc,
+                       "Link XON Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xon_rcvd",
+                       CTLFLAG_RD, &stats->lxonrxc,
+                       "Link XON Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_txd",
+                       CTLFLAG_RD, &stats->lxofftxc,
+                       "Link XOFF Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "link_xoff_rcvd",
+                       CTLFLAG_RD, &stats->lxoffrxc,
+                       "Link XOFF Received");
+
+       /* Packet Reception Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_octets_rcvd",
+                       CTLFLAG_RD, &stats->tor, 
+                       "Total Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_rcvd",
+                       CTLFLAG_RD, &stats->gorc, 
+                       "Good Octets Received"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_rcvd",
+                       CTLFLAG_RD, &stats->tpr,
+                       "Total Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_rcvd",
+                       CTLFLAG_RD, &stats->gprc,
+                       "Good Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mprc,
+                       "Multicast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_rcvd",
+                       CTLFLAG_RD, &stats->bprc,
+                       "Broadcast Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
+                       CTLFLAG_RD, &stats->prc64,
+                       "64 byte frames received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
+                       CTLFLAG_RD, &stats->prc127,
+                       "65-127 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
+                       CTLFLAG_RD, &stats->prc255,
+                       "128-255 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
+                       CTLFLAG_RD, &stats->prc511,
+                       "256-511 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
+                       CTLFLAG_RD, &stats->prc1023,
+                       "512-1023 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->prc1522,
+                       "1023-1522 byte frames received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersized",
+                       CTLFLAG_RD, &stats->ruc,
+                       "Receive Undersized");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
+                       CTLFLAG_RD, &stats->rfc,
+                       "Fragmented Packets Received ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversized",
+                       CTLFLAG_RD, &stats->roc,
+                       "Oversized Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabberd",
+                       CTLFLAG_RD, &stats->rjc,
+                       "Received Jabber");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_rcvd",
+                       CTLFLAG_RD, &stats->mngprc,
+                       "Management Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_drpd",
+                       CTLFLAG_RD, &stats->mngptc,
+                       "Management Packets Dropped");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "checksum_errs",
+                       CTLFLAG_RD, &stats->xec,
+                       "Checksum Errors");
+
+       /* Packet Transmission Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+                       CTLFLAG_RD, &stats->gotc, 
+                       "Good Octets Transmitted"); 
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
+                       CTLFLAG_RD, &stats->tpt,
+                       "Total Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+                       CTLFLAG_RD, &stats->gptc,
+                       "Good Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
+                       CTLFLAG_RD, &stats->bptc,
+                       "Broadcast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
+                       CTLFLAG_RD, &stats->mptc,
+                       "Multicast Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "management_pkts_txd",
+                       CTLFLAG_RD, &stats->mngptc,
+                       "Management Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
+                       CTLFLAG_RD, &stats->ptc64,
+                       "64 byte frames transmitted ");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
+                       CTLFLAG_RD, &stats->ptc127,
+                       "65-127 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
+                       CTLFLAG_RD, &stats->ptc255,
+                       "128-255 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
+                       CTLFLAG_RD, &stats->ptc511,
+                       "256-511 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
+                       CTLFLAG_RD, &stats->ptc1023,
+                       "512-1023 byte frames transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
+                       CTLFLAG_RD, &stats->ptc1522,
+                       "1024-1522 byte frames transmitted");
+
+       /* FC Stats */
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_crc",
+               CTLFLAG_RD, &stats->fccrc,
+               "FC CRC Errors");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_last",
+               CTLFLAG_RD, &stats->fclast,
+               "FC Last Error");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_drpd",
+               CTLFLAG_RD, &stats->fcoerpdc,
+               "FCoE Packets Dropped");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_rcvd",
+               CTLFLAG_RD, &stats->fcoeprc,
+               "FCoE Packets Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_pkts_txd",
+               CTLFLAG_RD, &stats->fcoeptc,
+               "FCoE Packets Transmitted");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_rcvd",
+               CTLFLAG_RD, &stats->fcoedwrc,
+               "FCoE DWords Received");
+       SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "fc_dword_txd",
+               CTLFLAG_RD, &stats->fcoedwtc,
+               "FCoE DWords Transmitted");
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+**     0 - off
+**     1 - rx pause
+**     2 - tx pause
+**     3 - full
+*/
+static int
+ixgbe_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+       int error, last;
+       struct adapter *adapter = (struct adapter *) arg1;
+
+       last = adapter->fc;
+       error = sysctl_handle_int(oidp, &adapter->fc, 0, req);
+       if ((error) || (req->newptr == NULL))
+               return (error);
+
+       /* Don't bother if it's not changed */
+       if (adapter->fc == last)
+               return (0);
+
+       switch (adapter->fc) {
+               case ixgbe_fc_rx_pause:
+               case ixgbe_fc_tx_pause:
+               case ixgbe_fc_full:
+                       adapter->hw.fc.requested_mode = adapter->fc;
+                       break;
+               case ixgbe_fc_none:
+               default:
+                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
+       }
+
+       ixgbe_fc_enable(&adapter->hw, 0);
+       return error;
+}
+
+static void
+ixgbe_add_rx_process_limit(struct adapter *adapter, const char *name,
+        const char *description, int *limit, int value)
+{
+        *limit = value;
+        SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+            SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+            OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
+/*
+** Control link advertise speed:
+**     0 - normal
+**     1 - advertise only 1G
+**     2 - advertise 100Mb
+*/
+static int
+ixgbe_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+       int                     error = 0;
+       struct adapter          *adapter;
+       device_t                dev;
+       struct ixgbe_hw         *hw;
+       ixgbe_link_speed        speed, last;
+
+       adapter = (struct adapter *) arg1;
+       dev = adapter->dev;
+       hw = &adapter->hw;
+       last = hw->phy.autoneg_advertised;
+
+       error = sysctl_handle_int(oidp, &adapter->advertise, 0, req);
+
+       if ((error) || (adapter->advertise == -1))
+               return (error);
+
+       if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
+            (hw->phy.multispeed_fiber)))
+               return (error);
+
+       if ((adapter->advertise == 2) && (hw->mac.type != ixgbe_mac_X540)) {
+               device_printf(dev, "Set Advertise: 100Mb on X540 only\n");
+               return (error);
+       }
+
+       if (adapter->advertise == 1)
+                speed = IXGBE_LINK_SPEED_1GB_FULL;
+       else if (adapter->advertise == 2)
+                speed = IXGBE_LINK_SPEED_100_FULL;
+       else
+                speed = IXGBE_LINK_SPEED_1GB_FULL |
+                       IXGBE_LINK_SPEED_10GB_FULL;
+
+       if (speed == last) /* no change */
+               return (error);
+
+       hw->mac.autotry_restart = TRUE;
+       hw->mac.ops.setup_link(hw, speed, TRUE, TRUE);
+
+       return (error);
+}
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe.h
new file mode 100644 (file)
index 0000000..716e775
--- /dev/null
@@ -0,0 +1,521 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXGBE_H_
+#define _IXGBE_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#if __FreeBSD_version >= 800000
+#include <sys/buf_ring.h>
+#endif
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+
+#ifdef IXGBE_IEEE1588
+#include <sys/ieee1588.h>
+#endif
+
+#include "ixgbe_api.h"
+
+/* Tunables */
+
+/*
+ * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of transmit descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more transmits. Each descriptor is 16
+ * bytes. Performance tests have show the 2K value to be optimal for top
+ * performance.
+ */
+#define DEFAULT_TXD    1024
+#define PERFORM_TXD    2048
+#define MAX_TXD                4096
+#define MIN_TXD                64
+
+/*
+ * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of receive descriptors allocated for each RX queue. Increasing this
+ * value allows the driver to buffer more incoming packets. Each descriptor
+ * is 16 bytes.  A receive buffer is also allocated for each descriptor. 
+ * 
+ * Note: with 8 rings and a dual port card, it is possible to bump up 
+ *     against the system mbuf pool limit, you can tune nmbclusters
+ *     to adjust for this.
+ */
+#define DEFAULT_RXD    1024
+#define PERFORM_RXD    2048
+#define MAX_RXD                4096
+#define MIN_RXD                64
+
+/* Alignment for rings */
+#define DBA_ALIGN      128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP       10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXGBE_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXGBE_TX_CLEANUP_THRESHOLD     (adapter->num_tx_desc / 8)
+#define IXGBE_TX_OP_THRESHOLD          (adapter->num_tx_desc / 32)
+
+#define IXGBE_MAX_FRAME_SIZE   0x3F00
+
+/* Flow control constants */
+#define IXGBE_FC_PAUSE         0xFFFF
+#define IXGBE_FC_HI            0x20000
+#define IXGBE_FC_LO            0x10000
+
+/* Keep older OS drivers building... */
+#if !defined(SYSCTL_ADD_UQUAD)
+#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
+#endif
+
+/* Defines for printing debug information */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  printf(S "\n")
+#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S)              if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) printf(S "\n", A, B)
+
+#define MAX_NUM_MULTICAST_ADDRESSES     128
+#define IXGBE_82598_SCATTER            100
+#define IXGBE_82599_SCATTER            32
+#define MSIX_82598_BAR                 3
+#define MSIX_82599_BAR                 4
+#define IXGBE_TSO_SIZE                 65535
+#define IXGBE_TX_BUFFER_SIZE           ((u32) 1514)
+#define IXGBE_RX_HDR                   128
+#define IXGBE_VFTA_SIZE                        128
+#define IXGBE_BR_SIZE                  4096
+#define IXGBE_QUEUE_IDLE               0
+#define IXGBE_QUEUE_WORKING            1
+#define IXGBE_QUEUE_HUNG               2
+
+/* Offload bits in mbuf flag */
+#if __FreeBSD_version >= 800000
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+#else
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP)
+#endif
+
+/* For 6.X code compatibility */
+#if !defined(ETHER_BPF_MTAP)
+#define ETHER_BPF_MTAP         BPF_MTAP
+#endif
+
+#if __FreeBSD_version < 700000
+#define CSUM_TSO               0
+#define IFCAP_TSO4             0
+#endif
+
+/*
+ * Interrupt Moderation parameters 
+ */
+#define IXGBE_LOW_LATENCY      128
+#define IXGBE_AVE_LATENCY      400
+#define IXGBE_BULK_LATENCY     1200
+#define IXGBE_LINK_ITR         2000
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ * 
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ * 
+ *****************************************************************************
+ */
+typedef struct _ixgbe_vendor_info_t {
+       unsigned int    vendor_id;
+       unsigned int    device_id;
+       unsigned int    subvendor_id;
+       unsigned int    subdevice_id;
+       unsigned int    index;
+} ixgbe_vendor_info_t;
+
+
+struct ixgbe_tx_buf {
+       u32             eop_index;
+       struct mbuf     *m_head;
+       bus_dmamap_t    map;
+};
+
+struct ixgbe_rx_buf {
+       struct mbuf     *m_head;
+       struct mbuf     *m_pack;
+       struct mbuf     *fmp;
+       bus_dmamap_t    hmap;
+       bus_dmamap_t    pmap;
+};
+
+/*
+ * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free.
+ */
+struct ixgbe_dma_alloc {
+       bus_addr_t              dma_paddr;
+       caddr_t                 dma_vaddr;
+       bus_dma_tag_t           dma_tag;
+       bus_dmamap_t            dma_map;
+       bus_dma_segment_t       dma_seg;
+       bus_size_t              dma_size;
+       int                     dma_nseg;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring.
+*/
+struct ix_queue {
+       struct adapter          *adapter;
+       u32                     msix;           /* This queue's MSIX vector */
+       u32                     eims;           /* This queue's EIMS bit */
+       u32                     eitr_setting;
+       struct resource         *res;
+       void                    *tag;
+       struct tx_ring          *txr;
+       struct rx_ring          *rxr;
+       struct task             que_task;
+       struct taskqueue        *tq;
+       u64                     irqs;
+};
+
+/*
+ * The transmit ring, one per queue
+ */
+struct tx_ring {
+        struct adapter         *adapter;
+       struct mtx              tx_mtx;
+       u32                     me;
+       int                     queue_status;
+       int                     watchdog_time;
+       union ixgbe_adv_tx_desc *tx_base;
+       struct ixgbe_dma_alloc  txdma;
+       u32                     next_avail_desc;
+       u32                     next_to_clean;
+       struct ixgbe_tx_buf     *tx_buffers;
+       volatile u16            tx_avail;
+       u32                     txd_cmd;
+       bus_dma_tag_t           txtag;
+       char                    mtx_name[16];
+#if __FreeBSD_version >= 800000
+       struct buf_ring         *br;
+#endif
+#ifdef IXGBE_FDIR
+       u16                     atr_sample;
+       u16                     atr_count;
+#endif
+       u32                     bytes;  /* used for AIM */
+       u32                     packets;
+       /* Soft Stats */
+       u64                     no_desc_avail;
+       u64                     total_packets;
+};
+
+
+/*
+ * The Receive ring, one per rx queue
+ */
+struct rx_ring {
+        struct adapter         *adapter;
+       struct mtx              rx_mtx;
+       u32                     me;
+       union ixgbe_adv_rx_desc *rx_base;
+       struct ixgbe_dma_alloc  rxdma;
+       struct lro_ctrl         lro;
+       bool                    lro_enabled;
+       bool                    hdr_split;
+       bool                    hw_rsc;
+       bool                    discard;
+        u32                    next_to_refresh;
+        u32                    next_to_check;
+       char                    mtx_name[16];
+       struct ixgbe_rx_buf     *rx_buffers;
+       bus_dma_tag_t           htag;
+       bus_dma_tag_t           ptag;
+
+       u32                     bytes; /* Used for AIM calc */
+       u32                     packets;
+
+       /* Soft stats */
+       u64                     rx_irq;
+       u64                     rx_split_packets;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_discarded;
+       u64                     rsc_num;
+#ifdef IXGBE_FDIR
+       u64                     flm;
+#endif
+};
+
+/* Our adapter structure */
+struct adapter {
+       struct ifnet            *ifp;
+       struct ixgbe_hw         hw;
+
+       struct ixgbe_osdep      osdep;
+       struct device           *dev;
+
+       struct resource         *pci_mem;
+       struct resource         *msix_mem;
+
+       /*
+        * Interrupt resources: this set is
+        * either used for legacy, or for Link
+        * when doing MSIX
+        */
+       void                    *tag;
+       struct resource         *res;
+
+       struct ifmedia          media;
+       struct callout          timer;
+       int                     msix;
+       int                     if_flags;
+
+       struct mtx              core_mtx;
+
+       eventhandler_tag        vlan_attach;
+       eventhandler_tag        vlan_detach;
+
+       u16                     num_vlans;
+       u16                     num_queues;
+
+       /*
+       ** Shadow VFTA table, this is needed because
+       ** the real vlan filter table gets cleared during
+       ** a soft reset and the driver needs to be able
+       ** to repopulate it.
+       */
+       u32                     shadow_vfta[IXGBE_VFTA_SIZE];
+
+       /* Info about the interface */
+       u32                     optics;
+       u32                     fc; /* local flow ctrl setting */
+       int                     advertise;  /* link speeds */
+       bool                    link_active;
+       u16                     max_frame_size;
+       u16                     num_segs;
+       u32                     link_speed;
+       bool                    link_up;
+       u32                     linkvec;
+
+       /* Mbuf cluster size */
+       u32                     rx_mbuf_sz;
+
+       /* Support for pluggable optics */
+       bool                    sfp_probe;
+       struct task             link_task;  /* Link tasklet */
+       struct task             mod_task;   /* SFP tasklet */
+       struct task             msf_task;   /* Multispeed Fiber */
+#ifdef IXGBE_FDIR
+       int                     fdir_reinit;
+       struct task             fdir_task;
+#endif
+       struct taskqueue        *tq;
+
+       /*
+       ** Queues: 
+       **   This is the irq holder, it has
+       **   and RX/TX pair or rings associated
+       **   with it.
+       */
+       struct ix_queue         *queues;
+
+       /*
+        * Transmit rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct tx_ring          *tx_rings;
+       int                     num_tx_desc;
+
+       /*
+        * Receive rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct rx_ring          *rx_rings;
+       int                     num_rx_desc;
+       u64                     que_mask;
+       u32                     rx_process_limit;
+
+       /* Multicast array memory */
+       u8                      *mta;
+
+       /* Misc stats maintained by the driver */
+       unsigned long           dropped_pkts;
+       unsigned long           mbuf_defrag_failed;
+       unsigned long           mbuf_header_failed;
+       unsigned long           mbuf_packet_failed;
+       unsigned long           no_tx_map_avail;
+       unsigned long           no_tx_dma_setup;
+       unsigned long           watchdog_events;
+       unsigned long           tso_tx;
+       unsigned long           link_irq;
+
+       struct ixgbe_hw_stats   stats;
+};
+
+/* Precision Time Sync (IEEE 1588) defines */
+#define ETHERTYPE_IEEE1588      0x88F7
+#define PICOSECS_PER_TICK       20833
+#define TSYNC_UDP_PORT          319 /* UDP port for the protocol */
+#define IXGBE_ADVTXD_TSTAMP    0x00080000
+
+
+#define IXGBE_CORE_LOCK_INIT(_sc, _name) \
+        mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF)
+#define IXGBE_CORE_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->core_mtx)
+#define IXGBE_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->tx_mtx)
+#define IXGBE_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->rx_mtx)
+#define IXGBE_CORE_LOCK(_sc)              mtx_lock(&(_sc)->core_mtx)
+#define IXGBE_TX_LOCK(_sc)                mtx_lock(&(_sc)->tx_mtx)
+#define IXGBE_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->tx_mtx)
+#define IXGBE_RX_LOCK(_sc)                mtx_lock(&(_sc)->rx_mtx)
+#define IXGBE_CORE_UNLOCK(_sc)            mtx_unlock(&(_sc)->core_mtx)
+#define IXGBE_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->tx_mtx)
+#define IXGBE_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->rx_mtx)
+#define IXGBE_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+#define IXGBE_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+
+static inline bool
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+       switch (hw->phy.type) {
+       case ixgbe_phy_sfp_avago:
+       case ixgbe_phy_sfp_ftl:
+       case ixgbe_phy_sfp_intel:
+       case ixgbe_phy_sfp_unknown:
+       case ixgbe_phy_sfp_passive_tyco:
+       case ixgbe_phy_sfp_passive_unknown:
+               return TRUE;
+       default:
+               return FALSE;
+       }
+}
+
+/* Workaround to make 8.0 buildable */
+#if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+        if (ALTQ_IS_ENABLED(&ifp->if_snd))
+                return (1);
+#endif
+        return (!buf_ring_empty(br));
+}
+#endif
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixgbe_rx_unrefreshed(struct rx_ring *rxr)
+{       
+       struct adapter  *adapter = rxr->adapter;
+        
+       if (rxr->next_to_check > rxr->next_to_refresh)
+               return (rxr->next_to_check - rxr->next_to_refresh - 1);
+       else
+               return ((adapter->num_rx_desc + rxr->next_to_check) -
+                   rxr->next_to_refresh - 1);
+}       
+
+#endif /* _IXGBE_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82598.c
new file mode 100644 (file)
index 0000000..6a1af54
--- /dev/null
@@ -0,0 +1,1402 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg);
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw);
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                       bool autoneg_wait_to_complete);
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed, bool *link_up,
+                                      bool link_up_wait_to_complete);
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+                                            ixgbe_link_speed speed,
+                                            bool autoneg,
+                                            bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete);
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw);
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan,
+                         u32 vind, bool vlan_on);
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw);
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw);
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                  u32 headroom, int strategy);
+
+/**
+ *  ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82598 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 250ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
+{
+       u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
+       u16 pcie_devctl2;
+
+       /* only take action if timeout value is defaulted to 0 */
+       if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
+               goto out;
+
+       /*
+        * if capababilities version is type 1 we can write the
+        * timeout of 10ms to 250ms through the GCR register
+        */
+       if (!(gcr & IXGBE_GCR_CAP_VER2)) {
+               gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
+               goto out;
+       }
+
+       /*
+        * for version 2 capabilities we need to write the config space
+        * directly in order to set the completion timeout value for
+        * 16ms to 55ms
+        */
+       pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2);
+       pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
+       IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
+out:
+       /* disable completion timeout resend */
+       gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
+       IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
+}
+
+/**
+ *  ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
+ *  @hw: pointer to hardware structure
+ *
+ *  Read PCIe configuration space, and get the MSI-X vector count from
+ *  the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
+{
+       u32 msix_count = 18;
+
+       DEBUGFUNC("ixgbe_get_pcie_msix_count_82598");
+
+       if (hw->mac.msix_vectors_from_pcie) {
+               msix_count = IXGBE_READ_PCIE_WORD(hw,
+                                                 IXGBE_PCIE_MSIX_82598_CAPS);
+               msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+               /* MSI-X count is zero-based in HW, so increment to give
+                * proper value */
+               msix_count++;
+       }
+       return msix_count;
+}
+
+/**
+ *  ixgbe_init_ops_82598 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82598.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val;
+
+       DEBUGFUNC("ixgbe_init_ops_82598");
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+       /* PHY */
+       phy->ops.init = &ixgbe_init_phy_ops_82598;
+
+       /* MAC */
+       mac->ops.start_hw = &ixgbe_start_hw_82598;
+       mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_82598;
+       mac->ops.reset_hw = &ixgbe_reset_hw_82598;
+       mac->ops.get_media_type = &ixgbe_get_media_type_82598;
+       mac->ops.get_supported_physical_layer =
+                                   &ixgbe_get_supported_physical_layer_82598;
+       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82598;
+       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82598;
+       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_82598;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82598;
+       mac->ops.set_vfta = &ixgbe_set_vfta_82598;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_82598;
+
+       /* Flow Control */
+       mac->ops.fc_enable = &ixgbe_fc_enable_82598;
+
+       mac->mcft_size       = 128;
+       mac->vft_size        = 128;
+       mac->num_rar_entries = 16;
+       mac->rx_pb_size      = 512;
+       mac->max_tx_queues   = 32;
+       mac->max_rx_queues   = 64;
+       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw);
+
+       /* SFP+ Module */
+       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598;
+
+       /* Link */
+       mac->ops.check_link = &ixgbe_check_mac_link_82598;
+       mac->ops.setup_link = &ixgbe_setup_mac_link_82598;
+       mac->ops.flap_tx_laser = NULL;
+       mac->ops.get_link_capabilities =
+                              &ixgbe_get_link_capabilities_82598;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_82598;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = NULL;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_phy_ops_82598 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val = IXGBE_SUCCESS;
+       u16 list_offset, data_offset;
+
+       DEBUGFUNC("ixgbe_init_phy_ops_82598");
+
+       /* Identify the PHY */
+       phy->ops.identify(hw);
+
+       /* Overwrite the link function pointers if copper PHY */
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
+               mac->ops.get_link_capabilities =
+                                 &ixgbe_get_copper_link_capabilities_generic;
+       }
+
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+               phy->ops.get_firmware_version =
+                            &ixgbe_get_phy_firmware_version_tnx;
+               break;
+       case ixgbe_phy_nl:
+               phy->ops.reset = &ixgbe_reset_phy_nl;
+
+               /* Call SFP+ identify routine to get the SFP+ module type */
+               ret_val = phy->ops.identify_sfp(hw);
+               if (ret_val != IXGBE_SUCCESS)
+                       goto out;
+               else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
+                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       goto out;
+               }
+
+               /* Check to see if SFP+ module is supported */
+               ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
+                                                           &list_offset,
+                                                           &data_offset);
+               if (ret_val != IXGBE_SUCCESS) {
+                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       goto out;
+               }
+               break;
+       default:
+               break;
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function.
+ *  Disables relaxed ordering Then set pcie completion timeout
+ *
+ **/
+s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
+{
+       u32 regval;
+       u32 i;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_hw_82598");
+
+       ret_val = ixgbe_start_hw_generic(hw);
+
+       /* Disable relaxed ordering */
+       for (i = 0; ((i < hw->mac.max_tx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+               regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+       }
+
+       for (i = 0; ((i < hw->mac.max_rx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+               regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+       }
+
+       /* set the completion timeout for interface */
+       if (ret_val == IXGBE_SUCCESS)
+               ixgbe_set_pcie_completion_timeout(hw);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_82598 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 autoc = 0;
+
+       DEBUGFUNC("ixgbe_get_link_capabilities_82598");
+
+       /*
+        * Determine link capabilities based on the stored value of AUTOC,
+        * which represents EEPROM defaults.  If AUTOC value has not been
+        * stored, use the current register value.
+        */
+       if (hw->mac.orig_link_settings_stored)
+               autoc = hw->mac.orig_autoc;
+       else
+               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               *autoneg = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_1G_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = TRUE;
+               break;
+
+       case IXGBE_AUTOC_LMS_KX4_AN:
+       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               *autoneg = TRUE;
+               break;
+
+       default:
+               status = IXGBE_ERR_LINK_SETUP;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82598 - Determines media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
+{
+       enum ixgbe_media_type media_type;
+
+       DEBUGFUNC("ixgbe_get_media_type_82598");
+
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+               media_type = ixgbe_media_type_copper;
+               goto out;
+       default:
+               break;
+       }
+
+       /* Media type for I82598 is based on device ID */
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598:
+       case IXGBE_DEV_ID_82598_BX:
+               /* Default device ID is mezzanine card KX/KX4 */
+               media_type = ixgbe_media_type_backplane;
+               break;
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+       case IXGBE_DEV_ID_82598EB_SFP_LOM:
+               media_type = ixgbe_media_type_fiber;
+               break;
+       case IXGBE_DEV_ID_82598EB_CX4:
+       case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+               media_type = ixgbe_media_type_cx4;
+               break;
+       case IXGBE_DEV_ID_82598AT:
+       case IXGBE_DEV_ID_82598AT2:
+               media_type = ixgbe_media_type_copper;
+               break;
+       default:
+               media_type = ixgbe_media_type_unknown;
+               break;
+       }
+out:
+       return media_type;
+}
+
+/**
+ *  ixgbe_fc_enable_82598 - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 fctrl_reg;
+       u32 rmcs_reg;
+       u32 reg;
+       u32 link_speed = 0;
+       bool link_up;
+
+       DEBUGFUNC("ixgbe_fc_enable_82598");
+
+       /*
+        * On 82598 having Rx FC on causes resets while doing 1G
+        * so if it's on turn it off once we know link_speed. For
+        * more details see 82598 Specification update.
+        */
+       hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
+       if (link_up && link_speed == IXGBE_LINK_SPEED_1GB_FULL) {
+               switch (hw->fc.requested_mode) {
+               case ixgbe_fc_full:
+                       hw->fc.requested_mode = ixgbe_fc_tx_pause;
+                       break;
+               case ixgbe_fc_rx_pause:
+                       hw->fc.requested_mode = ixgbe_fc_none;
+                       break;
+               default:
+                       /* no change */
+                       break;
+               }
+       }
+
+       /* Negotiate the fc mode to use */
+       ret_val = ixgbe_fc_autoneg(hw);
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+               goto out;
+
+       /* Disable any previous flow control settings */
+       fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE);
+
+       rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS);
+       rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X);
+
+       /*
+        * The possible values of fc.current_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *     we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.current_mode) {
+       case ixgbe_fc_none:
+               /*
+                * Flow control is disabled by software override or autoneg.
+                * The code below will actually disable it in the HW.
+                */
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               fctrl_reg |= IXGBE_FCTRL_RFCE;
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               fctrl_reg |= IXGBE_FCTRL_RFCE;
+               rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       /* Set 802.3x based flow control settings. */
+       fctrl_reg |= IXGBE_FCTRL_DPF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
+       IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
+
+       /* Set up and enable Rx high/low water mark thresholds, enable XON. */
+       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+               reg = hw->fc.low_water << 6;
+               if (hw->fc.send_xon)
+                       reg |= IXGBE_FCRTL_XONE;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+               reg = hw->fc.high_water[packetbuf_num] << 6;
+               reg |= IXGBE_FCRTH_FCEN;
+
+               IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
+       }
+
+       /* Configure pause time (2 TCs per register) */
+       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+       if ((packetbuf_num & 1) == 0)
+               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+       else
+               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_start_mac_link_82598 - Configures MAC link settings
+ *  @hw: pointer to hardware structure
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
+                                      bool autoneg_wait_to_complete)
+{
+       u32 autoc_reg;
+       u32 links_reg;
+       u32 i;
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_mac_link_82598");
+
+       /* Restart link */
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+       /* Only poll for autoneg to complete if specified to do so */
+       if (autoneg_wait_to_complete) {
+               if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_AN ||
+                   (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+                       links_reg = 0; /* Just in case Autoneg time = 0 */
+                       for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+                               links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+                               if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+                                       break;
+                               msec_delay(100);
+                       }
+                       if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+                               status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                               DEBUGOUT("Autonegotiation did not complete.\n");
+                       }
+               }
+       }
+
+       /* Add delay to filter out noises during initial link setup */
+       msec_delay(50);
+
+       return status;
+}
+
+/**
+ *  ixgbe_validate_link_ready - Function looks for phy link
+ *  @hw: pointer to hardware structure
+ *
+ *  Function indicates success when phy link is available. If phy is not ready
+ *  within 5 seconds of MAC indicating link, the function returns error.
+ **/
+static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
+{
+       u32 timeout;
+       u16 an_reg;
+
+       if (hw->device_id != IXGBE_DEV_ID_82598AT2)
+               return IXGBE_SUCCESS;
+
+       for (timeout = 0;
+            timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) {
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg);
+
+               if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) &&
+                   (an_reg & IXGBE_MII_AUTONEG_LINK_UP))
+                       break;
+
+               msec_delay(100);
+       }
+
+       if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) {
+               DEBUGOUT("Link was indicated but link is down\n");
+               return IXGBE_ERR_LINK_SETUP;
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_82598 - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: TRUE is link is up, FALSE otherwise
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed, bool *link_up,
+                                      bool link_up_wait_to_complete)
+{
+       u32 links_reg;
+       u32 i;
+       u16 link_reg, adapt_comp_reg;
+
+       DEBUGFUNC("ixgbe_check_mac_link_82598");
+
+       /*
+        * SERDES PHY requires us to read link status from undocumented
+        * register 0xC79F.  Bit 0 set indicates link is up/ready; clear
+        * indicates link down.  OxC00C is read to check that the XAUI lanes
+        * are active.  Bit 0 clear indicates active; set indicates inactive.
+        */
+       if (hw->phy.type == ixgbe_phy_nl) {
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg);
+               hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV,
+                                    &adapt_comp_reg);
+               if (link_up_wait_to_complete) {
+                       for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+                               if ((link_reg & 1) &&
+                                   ((adapt_comp_reg & 1) == 0)) {
+                                       *link_up = TRUE;
+                                       break;
+                               } else {
+                                       *link_up = FALSE;
+                               }
+                               msec_delay(100);
+                               hw->phy.ops.read_reg(hw, 0xC79F,
+                                                    IXGBE_TWINAX_DEV,
+                                                    &link_reg);
+                               hw->phy.ops.read_reg(hw, 0xC00C,
+                                                    IXGBE_TWINAX_DEV,
+                                                    &adapt_comp_reg);
+                       }
+               } else {
+                       if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
+                               *link_up = TRUE;
+                       else
+                               *link_up = FALSE;
+               }
+
+               if (*link_up == FALSE)
+                       goto out;
+       }
+
+       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+       if (link_up_wait_to_complete) {
+               for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+                       if (links_reg & IXGBE_LINKS_UP) {
+                               *link_up = TRUE;
+                               break;
+                       } else {
+                               *link_up = FALSE;
+                       }
+                       msec_delay(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+               }
+       } else {
+               if (links_reg & IXGBE_LINKS_UP)
+                       *link_up = TRUE;
+               else
+                       *link_up = FALSE;
+       }
+
+       if (links_reg & IXGBE_LINKS_SPEED)
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+       if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == TRUE) &&
+           (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS))
+               *link_up = FALSE;
+
+       /* if link is down, zero out the current_mode */
+       if (*link_up == FALSE) {
+               hw->fc.current_mode = ixgbe_fc_none;
+               hw->fc.fc_was_autonegged = FALSE;
+       }
+out:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82598 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
+                                           ixgbe_link_speed speed, bool autoneg,
+                                           bool autoneg_wait_to_complete)
+{
+       s32              status            = IXGBE_SUCCESS;
+       ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+       u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32              autoc             = curr_autoc;
+       u32              link_mode         = autoc & IXGBE_AUTOC_LMS_MASK;
+
+       DEBUGFUNC("ixgbe_setup_mac_link_82598");
+
+       /* Check to see if speed passed in is supported. */
+       ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+       speed &= link_capabilities;
+
+       if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+               status = IXGBE_ERR_LINK_SETUP;
+
+       /* Set KX4/KX support according to speed requested */
+       else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
+                link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+               autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
+               if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoc |= IXGBE_AUTOC_KX4_SUPP;
+               if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoc |= IXGBE_AUTOC_KX_SUPP;
+               if (autoc != curr_autoc)
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+       }
+
+       if (status == IXGBE_SUCCESS) {
+               /*
+                * Setup and restart the link based on the new values in
+                * ixgbe_hw This will write the AUTOC register based on the new
+                * stored values
+                */
+               status = ixgbe_start_mac_link_82598(hw,
+                                                   autoneg_wait_to_complete);
+       }
+
+       return status;
+}
+
+
+/**
+ *  ixgbe_setup_copper_link_82598 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ *
+ *  Sets the link speed in the AUTOC register in the MAC and restarts link.
+ **/
+static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_setup_copper_link_82598");
+
+       /* Setup the PHY according to input speed */
+       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+                                             autoneg_wait_to_complete);
+       /* Set up MAC */
+       ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82598 - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performing a PHY reset, and performing a link (MAC)
+ *  reset.
+ **/
+static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       s32 phy_status = IXGBE_SUCCESS;
+       u32 ctrl;
+       u32 gheccr;
+       u32 i;
+       u32 autoc;
+       u8  analog_val;
+
+       DEBUGFUNC("ixgbe_reset_hw_82598");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       status = hw->mac.ops.stop_adapter(hw);
+       if (status != IXGBE_SUCCESS)
+               goto reset_hw_out;
+
+       /*
+        * Power up the Atlas Tx lanes if they are currently powered down.
+        * Atlas Tx lanes are powered down for MAC loopback tests, but
+        * they are not automatically restored on reset.
+        */
+       hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val);
+       if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
+               /* Enable Tx Atlas so packets can be transmitted again */
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
+                                             analog_val);
+
+               hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+                                            &analog_val);
+               analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
+               hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
+                                             analog_val);
+       }
+
+       /* Reset PHY */
+       if (hw->phy.reset_disable == FALSE) {
+               /* PHY ops must be identified and initialized prior to reset */
+
+               /* Init PHY and function pointers, perform SFP setup */
+               phy_status = hw->phy.ops.init(hw);
+               if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+                       goto reset_hw_out;
+               if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto mac_reset_top;
+
+               hw->phy.ops.reset(hw);
+       }
+
+mac_reset_top:
+       /*
+        * Issue global reset to the MAC.  This needs to be a SW reset.
+        * If link reset is used, it might reset the MAC when mng is using it
+        */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL) | IXGBE_CTRL_RST;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear indicating reset is complete */
+       for (i = 0; i < 10; i++) {
+               usec_delay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST))
+                       break;
+       }
+       if (ctrl & IXGBE_CTRL_RST) {
+               status = IXGBE_ERR_RESET_FAILED;
+               DEBUGOUT("Reset polling failed to complete.\n");
+       }
+
+       msec_delay(50);
+
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               goto mac_reset_top;
+       }
+
+       gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
+       gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+       IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
+
+       /*
+        * Store the original AUTOC value if it has not been
+        * stored off yet.  Otherwise restore the stored original
+        * AUTOC value since the reset operation sets back to deaults.
+        */
+       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       if (hw->mac.orig_link_settings_stored == FALSE) {
+               hw->mac.orig_autoc = autoc;
+               hw->mac.orig_link_settings_stored = TRUE;
+       } else if (autoc != hw->mac.orig_autoc) {
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
+       }
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table
+        */
+       hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+       if (phy_status != IXGBE_SUCCESS)
+               status = phy_status;
+
+       return status;
+}
+
+/**
+ *  ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq set index
+ **/
+s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_set_vmdq_82598");
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       rar_high &= ~IXGBE_RAH_VIND_MASK;
+       rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq clear index (not used in 82598, but elsewhere)
+ **/
+static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       UNREFERENCED_1PARAMETER(vmdq);
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+       if (rar_high & IXGBE_RAH_VIND_MASK) {
+               rar_high &= ~IXGBE_RAH_VIND_MASK;
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vfta_82598 - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                                                     bool vlan_on)
+{
+       u32 regindex;
+       u32 bitindex;
+       u32 bits;
+       u32 vftabyte;
+
+       DEBUGFUNC("ixgbe_set_vfta_82598");
+
+       if (vlan > 4095)
+               return IXGBE_ERR_PARAM;
+
+       /* Determine 32-bit word position in array */
+       regindex = (vlan >> 5) & 0x7F;   /* upper seven bits */
+
+       /* Determine the location of the (VMD) queue index */
+       vftabyte =  ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */
+       bitindex = (vlan & 0x7) << 2;    /* lower 3 bits indicate nibble */
+
+       /* Set the nibble for VMD queue index */
+       bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex));
+       bits &= (~(0x0F << bitindex));
+       bits |= (vind << bitindex);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits);
+
+       /* Determine the location of the bit for this VLAN id */
+       bitindex = vlan & 0x1F;   /* lower five bits */
+
+       bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+       if (vlan_on)
+               /* Turn on this VLAN id */
+               bits |= (1 << bitindex);
+       else
+               /* Turn off this VLAN id */
+               bits &= ~(1 << bitindex);
+       IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vfta_82598 - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
+{
+       u32 offset;
+       u32 vlanbyte;
+
+       DEBUGFUNC("ixgbe_clear_vfta_82598");
+
+       for (offset = 0; offset < hw->mac.vft_size; offset++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+       for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
+               for (offset = 0; offset < hw->mac.vft_size; offset++)
+                       IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
+                                       0);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Atlas analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+       u32  atlas_ctl;
+
+       DEBUGFUNC("ixgbe_read_analog_reg8_82598");
+
+       IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
+                       IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+       atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
+       *val = (u8)atlas_ctl;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+       u32  atlas_ctl;
+
+       DEBUGFUNC("ixgbe_write_analog_reg8_82598");
+
+       atlas_ctl = (reg << 8) | val;
+       IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom_82598 - Reads 8 bit word over I2C interface.
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs 8 byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
+                                u8 *eeprom_data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 sfp_addr = 0;
+       u16 sfp_data = 0;
+       u16 sfp_stat = 0;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_read_i2c_eeprom_82598");
+
+       if (hw->phy.type == ixgbe_phy_nl) {
+               /*
+                * NetLogic phy SDA/SCL registers are at addresses 0xC30A to
+                * 0xC30D. These registers are used to talk to the SFP+
+                * module's EEPROM through the SDA/SCL (I2C) interface.
+                */
+               sfp_addr = (IXGBE_I2C_EEPROM_DEV_ADDR << 8) + byte_offset;
+               sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
+               hw->phy.ops.write_reg(hw,
+                                     IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     sfp_addr);
+
+               /* Poll status */
+               for (i = 0; i < 100; i++) {
+                       hw->phy.ops.read_reg(hw,
+                                            IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+                                            IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                            &sfp_stat);
+                       sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
+                       if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
+                               break;
+                       msec_delay(10);
+               }
+
+               if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) {
+                       DEBUGOUT("EEPROM read did not pass.\n");
+                       status = IXGBE_ERR_SFP_NOT_PRESENT;
+                       goto out;
+               }
+
+               /* Read data */
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+                                    IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data);
+
+               *eeprom_data = (u8)(sfp_data >> 8);
+       } else {
+               status = IXGBE_ERR_PHY;
+               goto out;
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
+{
+       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+       u16 ext_ability = 0;
+
+       DEBUGFUNC("ixgbe_get_supported_physical_layer_82598");
+
+       hw->phy.ops.identify(hw);
+
+       /* Copper PHY must be checked before AUTOC LMS to determine correct
+        * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_cu_unknown:
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+               goto out;
+       default:
+               break;
+       }
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_AN:
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+               else
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+               break;
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+               else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               else /* XAUI */
+                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+               break;
+       case IXGBE_AUTOC_LMS_KX4_AN:
+       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               break;
+       default:
+               break;
+       }
+
+       if (hw->phy.type == ixgbe_phy_nl) {
+               hw->phy.ops.identify_sfp(hw);
+
+               switch (hw->phy.sfp_type) {
+               case ixgbe_sfp_type_da_cu:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+                       break;
+               case ixgbe_sfp_type_sr:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+                       break;
+               case ixgbe_sfp_type_lr:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+                       break;
+               default:
+                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+                       break;
+               }
+       }
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+               break;
+       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+               break;
+       case IXGBE_DEV_ID_82598EB_XF_LR:
+               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+               break;
+       default:
+               break;
+       }
+
+out:
+       return physical_layer;
+}
+
+/**
+ *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
+ *  port devices.
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls common function and corrects issue with some single port devices
+ *  that enable LAN1 but not LAN0.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw)
+{
+       struct ixgbe_bus_info *bus = &hw->bus;
+       u16 pci_gen = 0;
+       u16 pci_ctrl2 = 0;
+
+       DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598");
+
+       ixgbe_set_lan_id_multi_port_pcie(hw);
+
+       /* check if LAN0 is disabled */
+       hw->eeprom.ops.read(hw, IXGBE_PCIE_GENERAL_PTR, &pci_gen);
+       if ((pci_gen != 0) && (pci_gen != 0xFFFF)) {
+
+               hw->eeprom.ops.read(hw, pci_gen + IXGBE_PCIE_CTRL2, &pci_ctrl2);
+
+               /* if LAN0 is completely disabled force function to 0 */
+               if ((pci_ctrl2 & IXGBE_PCIE_CTRL2_LAN_DISABLE) &&
+                   !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DISABLE_SELECT) &&
+                   !(pci_ctrl2 & IXGBE_PCIE_CTRL2_DUMMY_ENABLE)) {
+
+                       bus->func = 0;
+               }
+       }
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering_82598 - enable relaxed ordering
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw)
+{
+       u32 regval;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_enable_relaxed_ordering_82598");
+
+       /* Enable relaxed ordering */
+       for (i = 0; ((i < hw->mac.max_tx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
+               regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval);
+       }
+
+       for (i = 0; ((i < hw->mac.max_rx_queues) &&
+            (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+               regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                          IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+       }
+
+}
+
+/**
+ * ixgbe_set_rxpba_82598 - Initialize RX packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
+                                  u32 headroom, int strategy)
+{
+       u32 rxpktsize = IXGBE_RXPBSIZE_64KB;
+       u8 i = 0;
+       UNREFERENCED_1PARAMETER(headroom);
+
+       if (!num_pb)
+               return;
+
+       /* Setup Rx packet buffer sizes */
+       switch (strategy) {
+       case PBA_STRATEGY_WEIGHTED:
+               /* Setup the first four at 80KB */
+               rxpktsize = IXGBE_RXPBSIZE_80KB;
+               for (; i < 4; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               /* Setup the last four at 48KB...don't re-init i */
+               rxpktsize = IXGBE_RXPBSIZE_48KB;
+               /* Fall Through */
+       case PBA_STRATEGY_EQUAL:
+       default:
+               /* Divide the remaining Rx packet buffer evenly among the TCs */
+               for (; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               break;
+       }
+
+       /* Setup Tx packet buffer sizes */
+       for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
+
+       return;
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_82599.c
new file mode 100644 (file)
index 0000000..59639d4
--- /dev/null
@@ -0,0 +1,2281 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed,
+                                      bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw);
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                    ixgbe_link_speed speed, bool autoneg,
+                                    bool autoneg_wait_to_complete);
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                               bool autoneg_wait_to_complete);
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed,
+                                     bool autoneg,
+                                     bool autoneg_wait_to_complete);
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete);
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw);
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval);
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+                                  u16 offset, u16 *data);
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+                                         u16 words, u16 *data);
+
+void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+
+       DEBUGFUNC("ixgbe_init_mac_link_ops_82599");
+
+       /* enable the laser control functions for SFP+ fiber */
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
+               mac->ops.disable_tx_laser =
+                                      &ixgbe_disable_tx_laser_multispeed_fiber;
+               mac->ops.enable_tx_laser =
+                                       &ixgbe_enable_tx_laser_multispeed_fiber;
+               mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
+
+       } else {
+               mac->ops.disable_tx_laser = NULL;
+               mac->ops.enable_tx_laser = NULL;
+               mac->ops.flap_tx_laser = NULL;
+       }
+
+       if (hw->phy.multispeed_fiber) {
+               /* Set up dual speed SFP+ support */
+               mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+       } else {
+               if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) &&
+                    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
+                     hw->phy.smart_speed == ixgbe_smart_speed_on) &&
+                     !ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+                       mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
+               } else {
+                       mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
+               }
+       }
+}
+
+/**
+ *  ixgbe_init_phy_ops_82599 - PHY/SFP specific init
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize any function pointers that were not able to be
+ *  set during init_shared_code because the PHY/SFP type was
+ *  not known.  Perform the SFP init if necessary.
+ *
+ **/
+s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_init_phy_ops_82599");
+
+       /* Identify the PHY or SFP module */
+       ret_val = phy->ops.identify(hw);
+       if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto init_phy_ops_out;
+
+       /* Setup function pointers based on detected SFP module and speeds */
+       ixgbe_init_mac_link_ops_82599(hw);
+       if (hw->phy.sfp_type != ixgbe_sfp_type_unknown)
+               hw->phy.ops.reset = NULL;
+
+       /* If copper media, overwrite with copper function pointers */
+       if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
+               mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
+               mac->ops.get_link_capabilities =
+                                 &ixgbe_get_copper_link_capabilities_generic;
+       }
+
+       /* Set necessary function pointers based on phy type */
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+               phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
+               phy->ops.check_link = &ixgbe_check_phy_link_tnx;
+               phy->ops.get_firmware_version =
+                            &ixgbe_get_phy_firmware_version_tnx;
+               break;
+       default:
+               break;
+       }
+init_phy_ops_out:
+       return ret_val;
+}
+
+s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 reg_anlp1 = 0;
+       u32 i = 0;
+       u16 list_offset, data_offset, data_value;
+
+       DEBUGFUNC("ixgbe_setup_sfp_modules_82599");
+
+       if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
+               ixgbe_init_mac_link_ops_82599(hw);
+
+               hw->phy.ops.reset = NULL;
+
+               ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+                                                             &data_offset);
+               if (ret_val != IXGBE_SUCCESS)
+                       goto setup_sfp_out;
+
+               /* PHY config will finish before releasing the semaphore */
+               ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+                                                       IXGBE_GSSR_MAC_CSR_SM);
+               if (ret_val != IXGBE_SUCCESS) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto setup_sfp_out;
+               }
+
+               hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+               while (data_value != 0xffff) {
+                       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
+                       IXGBE_WRITE_FLUSH(hw);
+                       hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+               }
+
+               /* Release the semaphore */
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+               /* Delay obtaining semaphore again to allow FW access */
+               msec_delay(hw->eeprom.semaphore_delay);
+
+               /* Now restart DSP by setting Restart_AN and clearing LMS */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+                               IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+                               IXGBE_AUTOC_AN_RESTART));
+
+               /* Wait for AN to leave state 0 */
+               for (i = 0; i < 10; i++) {
+                       msec_delay(4);
+                       reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+                       if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+                               break;
+               }
+               if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+                       DEBUGOUT("sfp module setup not complete\n");
+                       ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+                       goto setup_sfp_out;
+               }
+
+               /* Restart DSP by setting Restart_AN and return to SFI mode */
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+                               IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+                               IXGBE_AUTOC_AN_RESTART));
+       }
+
+setup_sfp_out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_ops_82599 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82599.
+ *  Does not touch the hardware.
+ **/
+
+s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val;
+
+       DEBUGFUNC("ixgbe_init_ops_82599");
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+       /* PHY */
+       phy->ops.identify = &ixgbe_identify_phy_82599;
+       phy->ops.init = &ixgbe_init_phy_ops_82599;
+
+       /* MAC */
+       mac->ops.reset_hw = &ixgbe_reset_hw_82599;
+       mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
+       mac->ops.get_media_type = &ixgbe_get_media_type_82599;
+       mac->ops.get_supported_physical_layer =
+                                   &ixgbe_get_supported_physical_layer_82599;
+       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599;
+       mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599;
+       mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599;
+       mac->ops.start_hw = &ixgbe_start_hw_82599;
+       mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+       mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+       mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+       mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+       mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+       mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+       mac->rar_highwater = 1;
+       mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+       mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+       mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599;
+       mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+       mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+       /* Link */
+       mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599;
+       mac->ops.check_link            = &ixgbe_check_mac_link_generic;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+       ixgbe_init_mac_link_ops_82599(hw);
+
+       mac->mcft_size        = 128;
+       mac->vft_size         = 128;
+       mac->num_rar_entries  = 128;
+       mac->rx_pb_size       = 512;
+       mac->max_tx_queues    = 128;
+       mac->max_rx_queues    = 128;
+       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+       mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+                                  IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
+
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+       /* EEPROM */
+       eeprom->ops.read = &ixgbe_read_eeprom_82599;
+       eeprom->ops.read_buffer = &ixgbe_read_eeprom_buffer_82599;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_82599 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @negotiation: TRUE when autoneg or autotry is enabled
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed,
+                                      bool *negotiation)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 autoc = 0;
+
+       DEBUGFUNC("ixgbe_get_link_capabilities_82599");
+
+
+       /* Check if 1G SFP module. */
+       if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+           hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) {
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = TRUE;
+               goto out;
+       }
+
+       /*
+        * Determine link capabilities based on the stored value of AUTOC,
+        * which represents EEPROM defaults.  If AUTOC value has not
+        * been stored, use the current register values.
+        */
+       if (hw->mac.orig_link_settings_stored)
+               autoc = hw->mac.orig_autoc;
+       else
+               autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               *negotiation = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_1G_AN:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = TRUE;
+               break;
+
+       case IXGBE_AUTOC_LMS_10G_SERIAL:
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+               *negotiation = FALSE;
+               break;
+
+       case IXGBE_AUTOC_LMS_KX4_KX_KR:
+       case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+               if (autoc & IXGBE_AUTOC_KR_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = TRUE;
+               break;
+
+       case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII:
+               *speed = IXGBE_LINK_SPEED_100_FULL;
+               if (autoc & IXGBE_AUTOC_KR_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = TRUE;
+               break;
+
+       case IXGBE_AUTOC_LMS_SGMII_1G_100M:
+               *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL;
+               *negotiation = FALSE;
+               break;
+
+       default:
+               status = IXGBE_ERR_LINK_SETUP;
+               goto out;
+               break;
+       }
+
+       if (hw->phy.multispeed_fiber) {
+               *speed |= IXGBE_LINK_SPEED_10GB_FULL |
+                         IXGBE_LINK_SPEED_1GB_FULL;
+               *negotiation = TRUE;
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_get_media_type_82599 - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
+{
+       enum ixgbe_media_type media_type;
+
+       DEBUGFUNC("ixgbe_get_media_type_82599");
+
+       /* Detect if there is a copper PHY attached. */
+       switch (hw->phy.type) {
+       case ixgbe_phy_cu_unknown:
+       case ixgbe_phy_tn:
+               media_type = ixgbe_media_type_copper;
+               goto out;
+       default:
+               break;
+       }
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_82599_KX4:
+       case IXGBE_DEV_ID_82599_KX4_MEZZ:
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+       case IXGBE_DEV_ID_82599_KR:
+       case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+       case IXGBE_DEV_ID_82599_XAUI_LOM:
+               /* Default device ID is mezzanine card KX/KX4 */
+               media_type = ixgbe_media_type_backplane;
+               break;
+       case IXGBE_DEV_ID_82599_SFP:
+       case IXGBE_DEV_ID_82599_SFP_FCOE:
+       case IXGBE_DEV_ID_82599_SFP_EM:
+       case IXGBE_DEV_ID_82599EN_SFP:
+               media_type = ixgbe_media_type_fiber;
+               break;
+       case IXGBE_DEV_ID_82599_CX4:
+               media_type = ixgbe_media_type_cx4;
+               break;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               media_type = ixgbe_media_type_copper;
+               break;
+       default:
+               media_type = ixgbe_media_type_unknown;
+               break;
+       }
+out:
+       return media_type;
+}
+
+/**
+ *  ixgbe_start_mac_link_82599 - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Configures link settings based on values in the ixgbe_hw struct.
+ *  Restarts the link.  Performs autonegotiation if needed.
+ **/
+s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
+                               bool autoneg_wait_to_complete)
+{
+       u32 autoc_reg;
+       u32 links_reg;
+       u32 i;
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_mac_link_82599");
+
+
+       /* Restart link */
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+       /* Only poll for autoneg to complete if specified to do so */
+       if (autoneg_wait_to_complete) {
+               if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_KX_KR ||
+                   (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+                   (autoc_reg & IXGBE_AUTOC_LMS_MASK) ==
+                    IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+                       links_reg = 0; /* Just in case Autoneg time = 0 */
+                       for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+                               links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+                               if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+                                       break;
+                               msec_delay(100);
+                       }
+                       if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+                               status = IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                               DEBUGOUT("Autoneg did not complete.\n");
+                       }
+               }
+       }
+
+       /* Add delay to filter out noises during initial link setup */
+       msec_delay(50);
+
+       return status;
+}
+
+/**
+ *  ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively shutting down the Tx
+ *  laser on the PHY, effectively halting physical link.
+ **/
+void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+       /* Disable tx laser; allow 100us to go dark per spec */
+       esdp_reg |= IXGBE_ESDP_SDP3;
+       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(100);
+}
+
+/**
+ *  ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  The base drivers may require better control over SFP+ module
+ *  PHY states.  This includes selectively turning on the Tx
+ *  laser on the PHY, effectively starting physical link.
+ **/
+void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+
+       /* Enable tx laser; allow 100ms to light up */
+       esdp_reg &= ~IXGBE_ESDP_SDP3;
+       IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+       IXGBE_WRITE_FLUSH(hw);
+       msec_delay(100);
+}
+
+/**
+ *  ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support,
+ *  it sets autotry_restart to TRUE to indicate that we need to
+ *  initiate a new autotry session with the link partner.  To do
+ *  so, we set the speed then disable and re-enable the tx laser, to
+ *  alert the link partner that it also needs to restart autotry on its
+ *  end.  This is consistent with TRUE clause 37 autoneg, which also
+ *  involves a loss of signal.
+ **/
+void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
+{
+       DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber");
+
+       if (hw->mac.autotry_restart) {
+               ixgbe_disable_tx_laser_multispeed_fiber(hw);
+               ixgbe_enable_tx_laser_multispeed_fiber(hw);
+               hw->mac.autotry_restart = FALSE;
+       }
+}
+
+/**
+ *  ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+       s32 status = IXGBE_SUCCESS;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+       ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+       u32 speedcnt = 0;
+       u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+       u32 i = 0;
+       bool link_up = FALSE;
+       bool negotiation;
+
+       DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber");
+
+       /* Mask off requested but non-supported speeds */
+       status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation);
+       if (status != IXGBE_SUCCESS)
+               return status;
+
+       speed &= link_speed;
+
+       /*
+        * Try each speed one by one, highest priority first.  We do this in
+        * software because 10gb fiber doesn't support speed autonegotiation.
+        */
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+               speedcnt++;
+               highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+               /* If we already have link at this speed, just jump out */
+               status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+               if (status != IXGBE_SUCCESS)
+                       return status;
+
+               if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up)
+                       goto out;
+
+               /* Set the module link speed */
+               esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+               IXGBE_WRITE_FLUSH(hw);
+
+               /* Allow module to change analog characteristics (1G->10G) */
+               msec_delay(40);
+
+               status = ixgbe_setup_mac_link_82599(hw,
+                                               IXGBE_LINK_SPEED_10GB_FULL,
+                                               autoneg,
+                                               autoneg_wait_to_complete);
+               if (status != IXGBE_SUCCESS)
+                       return status;
+
+               /* Flap the tx laser if it has not already been done */
+               ixgbe_flap_tx_laser(hw);
+
+               /*
+                * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+                * Section 73.10.2, we may have to wait up to 500ms if KR is
+                * attempted.  82599 uses the same timing for 10g SFI.
+                */
+               for (i = 0; i < 5; i++) {
+                       /* Wait for the link partner to also set speed */
+                       msec_delay(100);
+
+                       /* If we have link, just jump out */
+                       status = ixgbe_check_link(hw, &link_speed,
+                                                 &link_up, FALSE);
+                       if (status != IXGBE_SUCCESS)
+                               return status;
+
+                       if (link_up)
+                               goto out;
+               }
+       }
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+               speedcnt++;
+               if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN)
+                       highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+               /* If we already have link at this speed, just jump out */
+               status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+               if (status != IXGBE_SUCCESS)
+                       return status;
+
+               if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up)
+                       goto out;
+
+               /* Set the module link speed */
+               esdp_reg &= ~IXGBE_ESDP_SDP5;
+               esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+               IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+               IXGBE_WRITE_FLUSH(hw);
+
+               /* Allow module to change analog characteristics (10G->1G) */
+               msec_delay(40);
+
+               status = ixgbe_setup_mac_link_82599(hw,
+                                                   IXGBE_LINK_SPEED_1GB_FULL,
+                                                   autoneg,
+                                                   autoneg_wait_to_complete);
+               if (status != IXGBE_SUCCESS)
+                       return status;
+
+               /* Flap the tx laser if it has not already been done */
+               ixgbe_flap_tx_laser(hw);
+
+               /* Wait for the link partner to also set speed */
+               msec_delay(100);
+
+               /* If we have link, just jump out */
+               status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+               if (status != IXGBE_SUCCESS)
+                       return status;
+
+               if (link_up)
+                       goto out;
+       }
+
+       /*
+        * We didn't get link.  Configure back to the highest speed we tried,
+        * (if there was more than one).  We call ourselves back with just the
+        * single highest speed that the user requested.
+        */
+       if (speedcnt > 1)
+               status = ixgbe_setup_mac_link_multispeed_fiber(hw,
+                       highest_link_speed, autoneg, autoneg_wait_to_complete);
+
+out:
+       /* Set autoneg_advertised value based on input link speed */
+       hw->phy.autoneg_advertised = 0;
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+       return status;
+}
+
+/**
+ *  ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Implements the Intel SmartSpeed algorithm.
+ **/
+s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
+                                    ixgbe_link_speed speed, bool autoneg,
+                                    bool autoneg_wait_to_complete)
+{
+       s32 status = IXGBE_SUCCESS;
+       ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
+       s32 i, j;
+       bool link_up = FALSE;
+       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
+       DEBUGFUNC("ixgbe_setup_mac_link_smartspeed");
+
+        /* Set autoneg_advertised value based on input link speed */
+       hw->phy.autoneg_advertised = 0;
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+       /*
+        * Implement Intel SmartSpeed algorithm.  SmartSpeed will reduce the
+        * autoneg advertisement if link is unable to be established at the
+        * highest negotiated rate.  This can sometimes happen due to integrity
+        * issues with the physical media connection.
+        */
+
+       /* First, try to get link with full advertisement */
+       hw->phy.smart_speed_active = FALSE;
+       for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
+               status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+                                                   autoneg_wait_to_complete);
+               if (status != IXGBE_SUCCESS)
+                       goto out;
+
+               /*
+                * Wait for the controller to acquire link.  Per IEEE 802.3ap,
+                * Section 73.10.2, we may have to wait up to 500ms if KR is
+                * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
+                * Table 9 in the AN MAS.
+                */
+               for (i = 0; i < 5; i++) {
+                       msec_delay(100);
+
+                       /* If we have link, just jump out */
+                       status = ixgbe_check_link(hw, &link_speed, &link_up,
+                                                 FALSE);
+                       if (status != IXGBE_SUCCESS)
+                               goto out;
+
+                       if (link_up)
+                               goto out;
+               }
+       }
+
+       /*
+        * We didn't get link.  If we advertised KR plus one of KX4/KX
+        * (or BX4/BX), then disable KR and try again.
+        */
+       if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
+           ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
+               goto out;
+
+       /* Turn SmartSpeed on to disable KR support */
+       hw->phy.smart_speed_active = TRUE;
+       status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+                                           autoneg_wait_to_complete);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       /*
+        * Wait for the controller to acquire link.  600ms will allow for
+        * the AN link_fail_inhibit_timer as well for multiple cycles of
+        * parallel detect, both 10g and 1g. This allows for the maximum
+        * connect attempts as defined in the AN MAS table 73-7.
+        */
+       for (i = 0; i < 6; i++) {
+               msec_delay(100);
+
+               /* If we have link, just jump out */
+               status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE);
+               if (status != IXGBE_SUCCESS)
+                       goto out;
+
+               if (link_up)
+                       goto out;
+       }
+
+       /* We didn't get link.  Turn SmartSpeed back off. */
+       hw->phy.smart_speed_active = FALSE;
+       status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
+                                           autoneg_wait_to_complete);
+
+out:
+       if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
+               DEBUGOUT("Smartspeed has downgraded the link speed "
+               "from the maximum advertised\n");
+       return status;
+}
+
+/**
+ *  ixgbe_setup_mac_link_82599 - Set MAC link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+       u32 start_autoc = autoc;
+       u32 orig_autoc = 0;
+       u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
+       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+       u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+       u32 links_reg;
+       u32 i;
+       ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
+
+       DEBUGFUNC("ixgbe_setup_mac_link_82599");
+
+       /* Check to see if speed passed in is supported. */
+       status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       speed &= link_capabilities;
+
+       if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
+               status = IXGBE_ERR_LINK_SETUP;
+               goto out;
+       }
+
+       /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
+       if (hw->mac.orig_link_settings_stored)
+               orig_autoc = hw->mac.orig_autoc;
+       else
+               orig_autoc = autoc;
+
+       if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+               /* Set KX4/KX/KR support according to speed requested */
+               autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP);
+               if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+                       if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
+                               autoc |= IXGBE_AUTOC_KX4_SUPP;
+                       if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
+                           (hw->phy.smart_speed_active == FALSE))
+                               autoc |= IXGBE_AUTOC_KR_SUPP;
+               if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoc |= IXGBE_AUTOC_KX_SUPP;
+       } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
+                  (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+                   link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+               /* Switch from 1G SFI to 10G SFI if requested */
+               if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
+                   (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
+                       autoc &= ~IXGBE_AUTOC_LMS_MASK;
+                       autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
+               }
+       } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
+                  (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+               /* Switch from 10G SFI to 1G SFI if requested */
+               if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
+                   (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
+                       autoc &= ~IXGBE_AUTOC_LMS_MASK;
+                       if (autoneg)
+                               autoc |= IXGBE_AUTOC_LMS_1G_AN;
+                       else
+                               autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN;
+               }
+       }
+
+       if (autoc != start_autoc) {
+               /* Restart link */
+               autoc |= IXGBE_AUTOC_AN_RESTART;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+
+               /* Only poll for autoneg to complete if specified to do so */
+               if (autoneg_wait_to_complete) {
+                       if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR ||
+                           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN ||
+                           link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) {
+                               links_reg = 0; /*Just in case Autoneg time=0*/
+                               for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) {
+                                       links_reg =
+                                              IXGBE_READ_REG(hw, IXGBE_LINKS);
+                                       if (links_reg & IXGBE_LINKS_KX_AN_COMP)
+                                               break;
+                                       msec_delay(100);
+                               }
+                               if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
+                                       status =
+                                               IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+                                       DEBUGOUT("Autoneg did not complete.\n");
+                               }
+                       }
+               }
+
+               /* Add delay to filter out noises during initial link setup */
+               msec_delay(50);
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE if waiting is needed to complete
+ *
+ *  Restarts link on PHY and MAC based on settings passed in.
+ **/
+static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed speed,
+                                               bool autoneg,
+                                               bool autoneg_wait_to_complete)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_setup_copper_link_82599");
+
+       /* Setup the PHY according to input speed */
+       status = hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+                                             autoneg_wait_to_complete);
+       /* Set up MAC */
+       ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_hw_82599 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ **/
+s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
+{
+       ixgbe_link_speed link_speed;
+       s32 status;
+       u32 ctrl, i, autoc, autoc2;
+       bool link_up = FALSE;
+
+       DEBUGFUNC("ixgbe_reset_hw_82599");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       status = hw->mac.ops.stop_adapter(hw);
+       if (status != IXGBE_SUCCESS)
+               goto reset_hw_out;
+
+       /* flush pending Tx transactions */
+       ixgbe_clear_tx_pending(hw);
+
+       /* PHY ops must be identified and initialized prior to reset */
+
+       /* Identify PHY and related function pointers */
+       status = hw->phy.ops.init(hw);
+
+       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto reset_hw_out;
+
+       /* Setup SFP module if there is one present. */
+       if (hw->phy.sfp_setup_needed) {
+               status = hw->mac.ops.setup_sfp(hw);
+               hw->phy.sfp_setup_needed = FALSE;
+       }
+
+       if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               goto reset_hw_out;
+
+       /* Reset PHY */
+       if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL)
+               hw->phy.ops.reset(hw);
+
+mac_reset_top:
+       /*
+        * Issue global reset to the MAC.  Needs to be SW reset if link is up.
+        * If link reset is used when link is up, it might reset the PHY when
+        * mng is using it.  If link is down or the flag to force full link
+        * reset is set, then perform link reset.
+        */
+       ctrl = IXGBE_CTRL_LNK_RST;
+       if (!hw->force_full_reset) {
+               hw->mac.ops.check_link(hw, &link_speed, &link_up, FALSE);
+               if (link_up)
+                       ctrl = IXGBE_CTRL_RST;
+       }
+
+       ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear indicating reset is complete */
+       for (i = 0; i < 10; i++) {
+               usec_delay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST_MASK))
+                       break;
+       }
+
+       if (ctrl & IXGBE_CTRL_RST_MASK) {
+               status = IXGBE_ERR_RESET_FAILED;
+               DEBUGOUT("Reset polling failed to complete.\n");
+       }
+
+       msec_delay(50);
+
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               goto mac_reset_top;
+       }
+
+       /*
+        * Store the original AUTOC/AUTOC2 values if they have not been
+        * stored off yet.  Otherwise restore the stored original
+        * values since the reset operation sets back to defaults.
+        */
+       autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+       if (hw->mac.orig_link_settings_stored == FALSE) {
+               hw->mac.orig_autoc = autoc;
+               hw->mac.orig_autoc2 = autoc2;
+               hw->mac.orig_link_settings_stored = TRUE;
+       } else {
+               if (autoc != hw->mac.orig_autoc)
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+                                       IXGBE_AUTOC_AN_RESTART));
+
+               if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+                   (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+                       autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+                       autoc2 |= (hw->mac.orig_autoc2 &
+                                  IXGBE_AUTOC2_UPPER_MASK);
+                       IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+               }
+       }
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table.  Also reset num_rar_entries to 128,
+        * since we modify this value when programming the SAN MAC address.
+        */
+       hw->mac.num_rar_entries = 128;
+       hw->mac.ops.init_rx_addrs(hw);
+
+       /* Store the permanent SAN mac address */
+       hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+       /* Add the SAN MAC address to the RAR only if it's a valid address */
+       if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* Reserve the last RAR for the SAN MAC address */
+               hw->mac.num_rar_entries--;
+       }
+
+       /* Store the alternative WWNN/WWPN prefix */
+       hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+                                      &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+       return status;
+}
+
+/**
+ *  ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
+{
+       int i;
+       u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+       fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
+
+       DEBUGFUNC("ixgbe_reinit_fdir_tables_82599");
+
+       /*
+        * Before starting reinitialization process,
+        * FDIRCMD.CMD must be zero.
+        */
+       for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) {
+               if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+                     IXGBE_FDIRCMD_CMD_MASK))
+                       break;
+               usec_delay(10);
+       }
+       if (i >= IXGBE_FDIRCMD_CMD_POLL) {
+               DEBUGOUT("Flow Director previous command isn't complete, "
+                        "aborting table re-initialization. \n");
+               return IXGBE_ERR_FDIR_REINIT_FAILED;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0);
+       IXGBE_WRITE_FLUSH(hw);
+       /*
+        * 82599 adapters flow director init flow cannot be restarted,
+        * Workaround 82599 silicon errata by performing the following steps
+        * before re-writing the FDIRCTRL control register with the same value.
+        * - write 1 to bit 8 of FDIRCMD register &
+        * - write 0 to bit 8 of FDIRCMD register
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+                        IXGBE_FDIRCMD_CLEARHT));
+       IXGBE_WRITE_FLUSH(hw);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+                       (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+                        ~IXGBE_FDIRCMD_CLEARHT));
+       IXGBE_WRITE_FLUSH(hw);
+       /*
+        * Clear FDIR Hash register to clear any leftover hashes
+        * waiting to be programmed.
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00);
+       IXGBE_WRITE_FLUSH(hw);
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll init-done after we write FDIRCTRL register */
+       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+                                  IXGBE_FDIRCTRL_INIT_DONE)
+                       break;
+               usec_delay(10);
+       }
+       if (i >= IXGBE_FDIR_INIT_DONE_POLL) {
+               DEBUGOUT("Flow Director Signature poll time exceeded!\n");
+               return IXGBE_ERR_FDIR_REINIT_FAILED;
+       }
+
+       /* Clear FDIR statistics registers (read to clear) */
+       IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+       IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT);
+       IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+       IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+       IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fdir_enable_82599 - Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+       int i;
+
+       DEBUGFUNC("ixgbe_fdir_enable_82599");
+
+       /* Prime the keys for hashing */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+       /*
+        * Poll init-done after we write the register.  Estimated times:
+        *      10G: PBALLOC = 11b, timing is 60us
+        *       1G: PBALLOC = 11b, timing is 600us
+        *     100M: PBALLOC = 11b, timing is 6ms
+        *
+        *     Multiple these timings by 4 if under full Rx load
+        *
+        * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+        * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+        * this might not finish in our poll time, but we can live with that
+        * for now.
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+       IXGBE_WRITE_FLUSH(hw);
+       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+                                  IXGBE_FDIRCTRL_INIT_DONE)
+                       break;
+               msec_delay(1);
+       }
+
+       if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+               DEBUGOUT("Flow Director poll time exceeded!\n");
+}
+
+/**
+ *  ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register, initially
+ *             contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+       DEBUGFUNC("ixgbe_init_fdir_signature_82599");
+
+       /*
+        * Continue setup of fdirctrl register bits:
+        *  Move the flexible bytes to use the ethertype - shift 6 words
+        *  Set the maximum length per hash bucket to 0xA filters
+        *  Send interrupt when 64 filters are left
+        */
+       fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+                   (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+                   (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+       /* write hashes and fdirctrl register, poll for completion */
+       ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register, initially
+ *             contains just the value of the Rx packet buffer allocation
+ **/
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+       DEBUGFUNC("ixgbe_init_fdir_perfect_82599");
+
+       /*
+        * Continue setup of fdirctrl register bits:
+        *  Turn perfect match filtering on
+        *  Report hash in RSS field of Rx wb descriptor
+        *  Initialize the drop queue
+        *  Move the flexible bytes to use the ethertype - shift 6 words
+        *  Set the maximum length per hash bucket to 0xA filters
+        *  Send interrupt when 64 (0x4 * 16) filters are left
+        */
+       fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH |
+                   IXGBE_FDIRCTRL_REPORT_STATUS |
+                   (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) |
+                   (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) |
+                   (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+                   (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+       /* write hashes and fdirctrl register, poll for completion */
+       ixgbe_fdir_enable_82599(hw, fdirctrl);
+
+       return IXGBE_SUCCESS;
+}
+
+/*
+ * These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define IXGBE_ATR_COMMON_HASH_KEY \
+               (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
+#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+       u32 n = (_n); \
+       if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+               common_hash ^= lo_hash_dword >> n; \
+       else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+               bucket_hash ^= lo_hash_dword >> n; \
+       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+               sig_hash ^= lo_hash_dword << (16 - n); \
+       if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+               common_hash ^= hi_hash_dword >> n; \
+       else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+               bucket_hash ^= hi_hash_dword >> n; \
+       else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+               sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0);
+
+/**
+ *  ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
+ *  @stream: input bitstream to compute the hash on
+ *
+ *  This function is almost identical to the function above but contains
+ *  several optomizations such as unwinding all of the loops, letting the
+ *  compiler work out all of the conditional ifs since the keys are static
+ *  defines, and computing two keys at once since the hashed dword stream
+ *  will be the same for both keys.
+ **/
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+                                     union ixgbe_atr_hash_dword common)
+{
+       u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+       u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+
+       /* record the flow_vm_vlan bits as they are a key part to the hash */
+       flow_vm_vlan = IXGBE_NTOHL(input.dword);
+
+       /* generate common hash dword */
+       hi_hash_dword = IXGBE_NTOHL(common.dword);
+
+       /* low dword is word swapped version of common */
+       lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+       /* apply flow ID/VM pool/VLAN ID bits to hash words */
+       hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+       /* Process bits 0 and 16 */
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+       /*
+        * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+        * delay this because bit 0 of the stream should not be processed
+        * so we do not add the vlan until after bit 0 was processed
+        */
+       lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+       /* Process remaining 30 bit of the key */
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
+       IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
+
+       /* combine common_hash result with signature and bucket hashes */
+       bucket_hash ^= common_hash;
+       bucket_hash &= IXGBE_ATR_HASH_MASK;
+
+       sig_hash ^= common_hash << 16;
+       sig_hash &= IXGBE_ATR_HASH_MASK << 16;
+
+       /* return completed signature hash */
+       return sig_hash ^ bucket_hash;
+}
+
+/**
+ *  ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ *  @hw: pointer to hardware structure
+ *  @input: unique input dword
+ *  @common: compressed common input dword
+ *  @queue: queue index to direct traffic to
+ **/
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                          union ixgbe_atr_hash_dword input,
+                                          union ixgbe_atr_hash_dword common,
+                                          u8 queue)
+{
+       u64  fdirhashcmd;
+       u32  fdircmd;
+
+       DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599");
+
+       /*
+        * Get the flow_type in order to program FDIRCMD properly
+        * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+        */
+       switch (input.formatted.flow_type) {
+       case IXGBE_ATR_FLOW_TYPE_TCPV4:
+       case IXGBE_ATR_FLOW_TYPE_UDPV4:
+       case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+       case IXGBE_ATR_FLOW_TYPE_TCPV6:
+       case IXGBE_ATR_FLOW_TYPE_UDPV6:
+       case IXGBE_ATR_FLOW_TYPE_SCTPV6:
+               break;
+       default:
+               DEBUGOUT(" Error on flow type input\n");
+               return IXGBE_ERR_CONFIG;
+       }
+
+       /* configure FDIRCMD register */
+       fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+       fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+       fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+       /*
+        * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+        * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
+        */
+       fdirhashcmd = (u64)fdircmd << 32;
+       fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
+       IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+       DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+       return IXGBE_SUCCESS;
+}
+
+#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+       u32 n = (_n); \
+       if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+               bucket_hash ^= lo_hash_dword >> n; \
+       if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+               bucket_hash ^= hi_hash_dword >> n; \
+} while (0);
+
+/**
+ *  ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
+ *  @atr_input: input bitstream to compute the hash on
+ *  @input_mask: mask for the input bitstream
+ *
+ *  This function serves two main purposes.  First it applys the input_mask
+ *  to the atr_input resulting in a cleaned up atr_input data stream.
+ *  Secondly it computes the hash and stores it in the bkt_hash field at
+ *  the end of the input byte stream.  This way it will be available for
+ *  future use without needing to recompute the hash.
+ **/
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *input_mask)
+{
+
+       u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+       u32 bucket_hash = 0;
+
+       /* Apply masks to input data */
+       input->dword_stream[0]  &= input_mask->dword_stream[0];
+       input->dword_stream[1]  &= input_mask->dword_stream[1];
+       input->dword_stream[2]  &= input_mask->dword_stream[2];
+       input->dword_stream[3]  &= input_mask->dword_stream[3];
+       input->dword_stream[4]  &= input_mask->dword_stream[4];
+       input->dword_stream[5]  &= input_mask->dword_stream[5];
+       input->dword_stream[6]  &= input_mask->dword_stream[6];
+       input->dword_stream[7]  &= input_mask->dword_stream[7];
+       input->dword_stream[8]  &= input_mask->dword_stream[8];
+       input->dword_stream[9]  &= input_mask->dword_stream[9];
+       input->dword_stream[10] &= input_mask->dword_stream[10];
+
+       /* record the flow_vm_vlan bits as they are a key part to the hash */
+       flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]);
+
+       /* generate common hash dword */
+       hi_hash_dword = IXGBE_NTOHL(input->dword_stream[1] ^
+                                   input->dword_stream[2] ^
+                                   input->dword_stream[3] ^
+                                   input->dword_stream[4] ^
+                                   input->dword_stream[5] ^
+                                   input->dword_stream[6] ^
+                                   input->dword_stream[7] ^
+                                   input->dword_stream[8] ^
+                                   input->dword_stream[9] ^
+                                   input->dword_stream[10]);
+
+       /* low dword is word swapped version of common */
+       lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+       /* apply flow ID/VM pool/VLAN ID bits to hash words */
+       hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+       /* Process bits 0 and 16 */
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+       /*
+        * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+        * delay this because bit 0 of the stream should not be processed
+        * so we do not add the vlan until after bit 0 was processed
+        */
+       lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+       /* Process remaining 30 bit of the key */
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(1);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(2);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(3);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(4);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(5);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(6);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(7);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(8);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(9);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(10);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(11);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(12);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(13);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(14);
+       IXGBE_COMPUTE_BKT_HASH_ITERATION(15);
+
+       /*
+        * Limit hash to 13 bits since max bucket count is 8K.
+        * Store result at the end of the input stream.
+        */
+       input->formatted.bkt_hash = bucket_hash & 0x1FFF;
+}
+
+/**
+ *  ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
+{
+       u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port);
+       mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+       mask |= IXGBE_NTOHS(input_mask->formatted.src_port);
+       mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+       mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+       mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+       return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * These two macros are meant to address the fact that we have registers
+ * that are either all or in part big-endian.  As a result on big-endian
+ * systems we will end up byte swapping the value to little-endian before
+ * it is byte swapped again and written to the hardware in the original
+ * big-endian format.
+ */
+#define IXGBE_STORE_AS_BE32(_value) \
+       (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
+        (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
+
+#define IXGBE_WRITE_REG_BE32(a, reg, value) \
+       IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value)))
+
+#define IXGBE_STORE_AS_BE16(_value) \
+       IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8))
+
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask)
+{
+       /* mask IPv6 since it is currently not supported */
+       u32 fdirm = IXGBE_FDIRM_DIPv6;
+       u32 fdirtcpm;
+
+       DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599");
+
+       /*
+        * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+        * are zero, then assume a full mask for that field.  Also assume that
+        * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+        * cannot be masked out in this implementation.
+        *
+        * This also assumes IPv4 only.  IPv6 masking isn't supported at this
+        * point in time.
+        */
+
+       /* verify bucket hash is cleared on hash generation */
+       if (input_mask->formatted.bkt_hash) {
+               DEBUGOUT(" bucket hash should always be 0 in mask\n");
+       }
+
+       /* Program FDIRM and verify partial masks */
+       switch (input_mask->formatted.vm_pool & 0x7F) {
+       case 0x0:
+               fdirm |= IXGBE_FDIRM_POOL;
+       case 0x7F:
+               break;
+       default:
+               DEBUGOUT(" Error on vm pool mask\n");
+               return IXGBE_ERR_CONFIG;
+       }
+
+       switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) {
+       case 0x0:
+               fdirm |= IXGBE_FDIRM_L4P;
+               if (input_mask->formatted.dst_port ||
+                   input_mask->formatted.src_port) {
+                       DEBUGOUT(" Error on src/dst port mask\n");
+                       return IXGBE_ERR_CONFIG;
+               }
+       case IXGBE_ATR_L4TYPE_MASK:
+               break;
+       default:
+               DEBUGOUT(" Error on flow type mask\n");
+               return IXGBE_ERR_CONFIG;
+       }
+
+       switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) {
+       case 0x0000:
+               /* mask VLAN ID, fall through to mask VLAN priority */
+               fdirm |= IXGBE_FDIRM_VLANID;
+       case 0x0FFF:
+               /* mask VLAN priority */
+               fdirm |= IXGBE_FDIRM_VLANP;
+               break;
+       case 0xE000:
+               /* mask VLAN ID only, fall through */
+               fdirm |= IXGBE_FDIRM_VLANID;
+       case 0xEFFF:
+               /* no VLAN fields masked */
+               break;
+       default:
+               DEBUGOUT(" Error on VLAN mask\n");
+               return IXGBE_ERR_CONFIG;
+       }
+
+       switch (input_mask->formatted.flex_bytes & 0xFFFF) {
+       case 0x0000:
+               /* Mask Flex Bytes, fall through */
+               fdirm |= IXGBE_FDIRM_FLEX;
+       case 0xFFFF:
+               break;
+       default:
+               DEBUGOUT(" Error on flexible byte mask\n");
+               return IXGBE_ERR_CONFIG;
+       }
+
+       /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+       /* store the TCP/UDP port masks, bit reversed from port layout */
+       fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask);
+
+       /* write both the same so that UDP and TCP use the same mask */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+       /* store source and destination IP masks (big-enian) */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+                            ~input_mask->formatted.src_ip[0]);
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+                            ~input_mask->formatted.dst_ip[0]);
+
+       return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue)
+{
+       u32 fdirport, fdirvlan, fdirhash, fdircmd;
+
+       DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599");
+
+       /* currently IPv6 is not supported, must be programmed with 0 */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0),
+                            input->formatted.src_ip[0]);
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1),
+                            input->formatted.src_ip[1]);
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2),
+                            input->formatted.src_ip[2]);
+
+       /* record the source address (big-endian) */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+
+       /* record the first 32 bits of the destination address (big-endian) */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+       /* record source and destination port (little-endian)*/
+       fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+       fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+       fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+       /* record vlan (little-endian) and flex_bytes(big-endian) */
+       fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes);
+       fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+       fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+       /* configure FDIRHASH register */
+       fdirhash = input->formatted.bkt_hash;
+       fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+       /*
+        * flush all previous writes to make certain registers are
+        * programmed prior to issuing the command
+        */
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* configure FDIRCMD register */
+       fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+       if (queue == IXGBE_FDIR_DROP_QUEUE)
+               fdircmd |= IXGBE_FDIRCMD_DROP;
+       fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+       fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+       fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+
+       return IXGBE_SUCCESS;
+}
+
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id)
+{
+       u32 fdirhash;
+       u32 fdircmd = 0;
+       u32 retry_count;
+       s32 err = IXGBE_SUCCESS;
+
+       /* configure FDIRHASH register */
+       fdirhash = input->formatted.bkt_hash;
+       fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+       /* flush hash to HW */
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Query if filter is present */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+       for (retry_count = 10; retry_count; retry_count--) {
+               /* allow 10us for query to process */
+               usec_delay(10);
+               /* verify query completed successfully */
+               fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+               if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+                       break;
+       }
+
+       if (!retry_count)
+               err = IXGBE_ERR_FDIR_REINIT_FAILED;
+
+       /* if filter exists in hardware then remove it */
+       if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+               IXGBE_WRITE_FLUSH(hw);
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+                               IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+       }
+
+       return err;
+}
+
+/**
+ *  ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
+ *  @hw: pointer to hardware structure
+ *  @input: input bitstream
+ *  @input_mask: mask for the input bitstream
+ *  @soft_id: software index for the filters
+ *  @queue: queue index to direct traffic to
+ *
+ *  Note that the caller to this function must lock before calling, since the
+ *  hardware writes must be protected from one another.
+ **/
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+                                       union ixgbe_atr_input *input,
+                                       union ixgbe_atr_input *input_mask,
+                                       u16 soft_id, u8 queue)
+{
+       s32 err = IXGBE_ERR_CONFIG;
+
+       DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599");
+
+       /*
+        * Check flow_type formatting, and bail out before we touch the hardware
+        * if there's a configuration issue
+        */
+       switch (input->formatted.flow_type) {
+       case IXGBE_ATR_FLOW_TYPE_IPV4:
+               input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK;
+               if (input->formatted.dst_port || input->formatted.src_port) {
+                       DEBUGOUT(" Error on src/dst port\n");
+                       return IXGBE_ERR_CONFIG;
+               }
+               break;
+       case IXGBE_ATR_FLOW_TYPE_SCTPV4:
+               if (input->formatted.dst_port || input->formatted.src_port) {
+                       DEBUGOUT(" Error on src/dst port\n");
+                       return IXGBE_ERR_CONFIG;
+               }
+       case IXGBE_ATR_FLOW_TYPE_TCPV4:
+       case IXGBE_ATR_FLOW_TYPE_UDPV4:
+               input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+                                                 IXGBE_ATR_L4TYPE_MASK;
+               break;
+       default:
+               DEBUGOUT(" Error on flow type input\n");
+               return err;
+       }
+
+       /* program input mask into the HW */
+       err = ixgbe_fdir_set_input_mask_82599(hw, input_mask);
+       if (err)
+               return err;
+
+       /* apply mask and compute/store hash */
+       ixgbe_atr_compute_perfect_hash_82599(input, input_mask);
+
+       /* program filters to filter memory */
+       return ixgbe_fdir_write_perfect_filter_82599(hw, input,
+                                                    soft_id, queue);
+}
+
+/**
+ *  ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs read operation to Omer analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+       u32  core_ctl;
+
+       DEBUGFUNC("ixgbe_read_analog_reg8_82599");
+
+       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
+                       (reg << 8));
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+       core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
+       *val = (u8)core_ctl;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: atlas register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Omer analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+       u32  core_ctl;
+
+       DEBUGFUNC("ixgbe_write_analog_reg8_82599");
+
+       core_ctl = (reg << 8) | val;
+       IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(10);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function
+ *  and the generation start_hw function.
+ *  Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_hw_82599");
+
+       ret_val = ixgbe_start_hw_generic(hw);
+       if (ret_val != IXGBE_SUCCESS)
+               goto out;
+
+       ret_val = ixgbe_start_hw_gen2(hw);
+       if (ret_val != IXGBE_SUCCESS)
+               goto out;
+
+       /* We need to run link autotry after the driver loads */
+       hw->mac.autotry_restart = TRUE;
+
+       if (ret_val == IXGBE_SUCCESS)
+               ret_val = ixgbe_verify_fw_version_82599(hw);
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_identify_phy_82599 - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ *  If PHY already detected, maintains current PHY type in hw struct,
+ *  otherwise executes the PHY detection routine.
+ **/
+s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+
+       DEBUGFUNC("ixgbe_identify_phy_82599");
+
+       /* Detect PHY if not unknown - returns success if already detected. */
+       status = ixgbe_identify_phy_generic(hw);
+       if (status != IXGBE_SUCCESS) {
+               /* 82599 10GBASE-T requires an external PHY */
+               if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
+                       goto out;
+               else
+                       status = ixgbe_identify_sfp_module_generic(hw);
+       }
+
+       /* Set PHY type none if no PHY detected */
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               hw->phy.type = ixgbe_phy_none;
+               status = IXGBE_SUCCESS;
+       }
+
+       /* Return error if SFP module has been detected but is not supported */
+       if (hw->phy.type == ixgbe_phy_sfp_unsupported)
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
+{
+       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+       u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
+       u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
+       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
+       u16 ext_ability = 0;
+       u8 comp_codes_10g = 0;
+       u8 comp_codes_1g = 0;
+
+       DEBUGFUNC("ixgbe_get_support_physical_layer_82599");
+
+       hw->phy.ops.identify(hw);
+
+       switch (hw->phy.type) {
+       case ixgbe_phy_tn:
+       case ixgbe_phy_cu_unknown:
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+               IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+               if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+               if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+               goto out;
+       default:
+               break;
+       }
+
+       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
+       case IXGBE_AUTOC_LMS_1G_AN:
+       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
+               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
+                           IXGBE_PHYSICAL_LAYER_1000BASE_BX;
+                       goto out;
+               } else
+                       /* SFI mode so read SFP module */
+                       goto sfp_check;
+               break;
+       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
+               if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
+               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
+               goto out;
+               break;
+       case IXGBE_AUTOC_LMS_10G_SERIAL:
+               if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+                       goto out;
+               } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
+                       goto sfp_check;
+               break;
+       case IXGBE_AUTOC_LMS_KX4_KX_KR:
+       case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
+               if (autoc & IXGBE_AUTOC_KX_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+               if (autoc & IXGBE_AUTOC_KX4_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
+               if (autoc & IXGBE_AUTOC_KR_SUPP)
+                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+               goto out;
+               break;
+       default:
+               goto out;
+               break;
+       }
+
+sfp_check:
+       /* SFP check must be done last since DA modules are sometimes used to
+        * test KR mode -  we need to id KR mode correctly before SFP module.
+        * Call identify_sfp because the pluggable module may have changed */
+       hw->phy.ops.identify_sfp(hw);
+       if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+               goto out;
+
+       switch (hw->phy.type) {
+       case ixgbe_phy_sfp_passive_tyco:
+       case ixgbe_phy_sfp_passive_unknown:
+               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
+               break;
+       case ixgbe_phy_sfp_ftl_active:
+       case ixgbe_phy_sfp_active_unknown:
+               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
+               break;
+       case ixgbe_phy_sfp_avago:
+       case ixgbe_phy_sfp_ftl:
+       case ixgbe_phy_sfp_intel:
+       case ixgbe_phy_sfp_unknown:
+               hw->phy.ops.read_i2c_eeprom(hw,
+                     IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
+               hw->phy.ops.read_i2c_eeprom(hw,
+                     IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
+               if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+               else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
+                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
+               break;
+       default:
+               break;
+       }
+
+out:
+       return physical_layer;
+}
+
+/**
+ *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit for 82599
+ **/
+s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
+{
+#define IXGBE_MAX_SECRX_POLL 30
+       int i;
+       int secrxreg;
+
+       DEBUGFUNC("ixgbe_enable_rx_dma_82599");
+
+       /*
+        * Workaround for 82599 silicon errata when enabling the Rx datapath.
+        * If traffic is incoming before we enable the Rx unit, it could hang
+        * the Rx DMA unit.  Therefore, make sure the security engine is
+        * completely disabled prior to enabling the Rx unit.
+        */
+       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+       for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) {
+               secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT);
+               if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY)
+                       break;
+               else
+                       /* Use interrupt-safe sleep just in case */
+                       usec_delay(10);
+       }
+
+       /* For informational purposes only */
+       if (i >= IXGBE_MAX_SECRX_POLL)
+               DEBUGOUT("Rx unit being enabled before security "
+                        "path fully disabled.  Continuing with init.\n");
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+       secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_verify_fw_version_82599 - verify fw version for 82599
+ *  @hw: pointer to hardware structure
+ *
+ *  Verifies that installed the firmware version is 0.6 or higher
+ *  for SFI devices. All 82599 SFI devices should have version 0.6 or higher.
+ *
+ *  Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or
+ *  if the FW version is not supported.
+ **/
+static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_EEPROM_VERSION;
+       u16 fw_offset, fw_ptp_cfg_offset;
+       u16 fw_version = 0;
+
+       DEBUGFUNC("ixgbe_verify_fw_version_82599");
+
+       /* firmware check is only necessary for SFI devices */
+       if (hw->phy.media_type != ixgbe_media_type_fiber) {
+               status = IXGBE_SUCCESS;
+               goto fw_version_out;
+       }
+
+       /* get the offset to the Firmware Module block */
+       hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+       if ((fw_offset == 0) || (fw_offset == 0xFFFF))
+               goto fw_version_out;
+
+       /* get the offset to the Pass Through Patch Configuration block */
+       hw->eeprom.ops.read(hw, (fw_offset +
+                                IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
+                                &fw_ptp_cfg_offset);
+
+       if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
+               goto fw_version_out;
+
+       /* get the firmware version */
+       hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
+                                IXGBE_FW_PATCH_VERSION_4),
+                                &fw_version);
+
+       if (fw_version > 0x5)
+               status = IXGBE_SUCCESS;
+
+fw_version_out:
+       return status;
+}
+
+/**
+ *  ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state.
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns TRUE if the LESM FW module is present and enabled. Otherwise
+ *  returns FALSE. Smart Speed must be disabled if LESM FW module is enabled.
+ **/
+bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+{
+       bool lesm_enabled = FALSE;
+       u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
+       s32 status;
+
+       DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599");
+
+       /* get the offset to the Firmware Module block */
+       status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+
+       if ((status != IXGBE_SUCCESS) ||
+           (fw_offset == 0) || (fw_offset == 0xFFFF))
+               goto out;
+
+       /* get the offset to the LESM Parameters block */
+       status = hw->eeprom.ops.read(hw, (fw_offset +
+                                IXGBE_FW_LESM_PARAMETERS_PTR),
+                                &fw_lesm_param_offset);
+
+       if ((status != IXGBE_SUCCESS) ||
+           (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
+               goto out;
+
+       /* get the lesm state word */
+       status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
+                                    IXGBE_FW_LESM_STATE_1),
+                                    &fw_lesm_state);
+
+       if ((status == IXGBE_SUCCESS) &&
+           (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+               lesm_enabled = TRUE;
+
+out:
+       return lesm_enabled;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using
+ *  fastest available method
+ *
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Retrieves 16 bit word(s) read from EEPROM
+ **/
+static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
+                                         u16 words, u16 *data)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val = IXGBE_ERR_CONFIG;
+
+       DEBUGFUNC("ixgbe_read_eeprom_buffer_82599");
+
+       /*
+        * If EEPROM is detected and can be addressed using 14 bits,
+        * use EERD otherwise use bit bang
+        */
+       if ((eeprom->type == ixgbe_eeprom_spi) &&
+           (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
+               ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
+                                                        data);
+       else
+               ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
+                                                                   words,
+                                                                   data);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_read_eeprom_82599 - Read EEPROM word using
+ *  fastest available method
+ *
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM
+ **/
+static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
+                                  u16 offset, u16 *data)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val = IXGBE_ERR_CONFIG;
+
+       DEBUGFUNC("ixgbe_read_eeprom_82599");
+
+       /*
+        * If EEPROM is detected and can be addressed using 14 bits,
+        * use EERD otherwise use bit bang
+        */
+       if ((eeprom->type == ixgbe_eeprom_spi) &&
+           (offset <= IXGBE_EERD_MAX_ADDR))
+               ret_val = ixgbe_read_eerd_generic(hw, offset, data);
+       else
+               ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+
+       return ret_val;
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.c
new file mode 100644 (file)
index 0000000..cdee623
--- /dev/null
@@ -0,0 +1,1130 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+
+extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_shared_code - Initialize the shared code
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers and assign the MAC type and PHY code.
+ *  Does not touch the hardware. This function must be called prior to any
+ *  other function in the shared code. The ixgbe_hw structure should be
+ *  memset to 0 prior to calling this function.  The following fields in
+ *  hw structure should be filled in prior to calling this function:
+ *  hw_addr, back, device_id, vendor_id, subsystem_device_id,
+ *  subsystem_vendor_id, and revision_id
+ **/
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_init_shared_code");
+
+       /*
+        * Set the mac type
+        */
+       ixgbe_set_mac_type(hw);
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               status = ixgbe_init_ops_82598(hw);
+               break;
+       case ixgbe_mac_82599EB:
+               status = ixgbe_init_ops_82599(hw);
+               break;
+       case ixgbe_mac_82599_vf:
+       case ixgbe_mac_X540_vf:
+               status = ixgbe_init_ops_vf(hw);
+               break;
+       case ixgbe_mac_X540:
+               status = ixgbe_init_ops_X540(hw);
+               break;
+       default:
+               status = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+               break;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  vendor ID and device ID stored in the hw structure.
+ **/
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_set_mac_type\n");
+
+       if (hw->vendor_id == IXGBE_INTEL_VENDOR_ID) {
+               switch (hw->device_id) {
+               case IXGBE_DEV_ID_82598:
+               case IXGBE_DEV_ID_82598_BX:
+               case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
+               case IXGBE_DEV_ID_82598AF_DUAL_PORT:
+               case IXGBE_DEV_ID_82598AT:
+               case IXGBE_DEV_ID_82598AT2:
+               case IXGBE_DEV_ID_82598EB_CX4:
+               case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
+               case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
+               case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
+               case IXGBE_DEV_ID_82598EB_XF_LR:
+               case IXGBE_DEV_ID_82598EB_SFP_LOM:
+                       hw->mac.type = ixgbe_mac_82598EB;
+                       break;
+               case IXGBE_DEV_ID_82599_KX4:
+               case IXGBE_DEV_ID_82599_KX4_MEZZ:
+               case IXGBE_DEV_ID_82599_XAUI_LOM:
+               case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               case IXGBE_DEV_ID_82599_KR:
+               case IXGBE_DEV_ID_82599_SFP:
+               case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
+               case IXGBE_DEV_ID_82599_SFP_FCOE:
+               case IXGBE_DEV_ID_82599_SFP_EM:
+               case IXGBE_DEV_ID_82599EN_SFP:
+               case IXGBE_DEV_ID_82599_CX4:
+               case IXGBE_DEV_ID_82599_T3_LOM:
+                       hw->mac.type = ixgbe_mac_82599EB;
+                       break;
+               case IXGBE_DEV_ID_82599_VF:
+                       hw->mac.type = ixgbe_mac_82599_vf;
+                       break;
+               case IXGBE_DEV_ID_X540_VF:
+                       hw->mac.type = ixgbe_mac_X540_vf;
+                       break;
+               case IXGBE_DEV_ID_X540T:
+                       hw->mac.type = ixgbe_mac_X540;
+                       break;
+               default:
+                       ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+                       break;
+               }
+       } else {
+               ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
+       }
+
+       DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n",
+                 hw->mac.type, ret_val);
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_hw - Initialize the hardware
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting and then starting the hardware
+ **/
+s32 ixgbe_init_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_reset_hw - Performs a hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks and
+ *  clears all interrupts, performs a PHY reset, and performs a MAC reset
+ **/
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_start_hw - Prepares hardware for Rx/Tx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type,
+ *  clears all on chip counters, initializes receive address registers,
+ *  multicast table, VLAN filter table, calls routine to setup link and
+ *  flow control settings, and leaves transmit and receive units disabled
+ *  and uninitialized.
+ **/
+s32 ixgbe_start_hw(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering - Enables tx relaxed ordering,
+ *  which is disabled by default in ixgbe_start_hw();
+ *
+ *  @hw: pointer to hardware structure
+ *
+ *   Enable relaxed ordering;
+ **/
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.enable_relaxed_ordering)
+               hw->mac.ops.enable_relaxed_ordering(hw);
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs - Clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_media_type - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw),
+                              ixgbe_media_type_unknown);
+}
+
+/**
+ *  ixgbe_get_mac_addr - Get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from the first Receive Address Register
+ *  (RAR0) A reset of the adapter must have been performed prior to calling
+ *  this function in order for the MAC address to have been loaded from the
+ *  EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr,
+                              (hw, mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_san_mac_addr - Get SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ **/
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr,
+                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_san_mac_addr - Write a SAN MAC address
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Writes A SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr,
+                              (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_device_caps - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word for device capabilities
+ *
+ *  Reads the extra device capabilities from the EEPROM
+ **/
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_device_caps,
+                              (hw, device_caps), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                         u16 *wwpn_prefix)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix,
+                              (hw, wwnn_prefix, wwpn_prefix),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_fcoe_boot_status -  Get FCOE boot status from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
+ *
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status,
+                              (hw, bs),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_bus_info - Set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_of_tx_queues - Get Tx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw)
+{
+       return hw->mac.max_tx_queues;
+}
+
+/**
+ *  ixgbe_get_num_of_rx_queues - Get Rx queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw)
+{
+       return hw->mac.max_rx_queues;
+}
+
+/**
+ *  ixgbe_stop_adapter - Disable Rx/Tx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_pba_string - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size)
+{
+       return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size);
+}
+
+/**
+ *  ixgbe_read_pba_num - Reads part number from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number from the EEPROM
+ *
+ *  Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num)
+{
+       return ixgbe_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ *  ixgbe_identify_phy - Get PHY type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw),
+                                        IXGBE_NOT_IMPLEMENTED);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_phy - Perform a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS)
+                       status = IXGBE_ERR_PHY;
+       }
+
+       if (status == IXGBE_SUCCESS) {
+               status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw),
+                                        IXGBE_NOT_IMPLEMENTED);
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version -
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to firmware version
+ **/
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version,
+                                (hw, firmware_version),
+                                IXGBE_NOT_IMPLEMENTED);
+       return status;
+}
+
+/**
+ *  ixgbe_read_phy_reg - Read PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ *
+ *  Reads a value from a specified PHY register
+ **/
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                       u16 *phy_data)
+{
+       if (hw->phy.id == 0)
+               ixgbe_identify_phy(hw);
+
+       return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr,
+                              device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_phy_reg - Write PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @phy_data: Data to write to the PHY register
+ *
+ *  Writes a value to specified PHY register
+ **/
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                        u16 phy_data)
+{
+       if (hw->phy.id == 0)
+               ixgbe_identify_phy(hw);
+
+       return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr,
+                              device_type, phy_data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link - Restart PHY autoneg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_check_phy_link - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads a PHY register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                         bool *link_up)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed,
+                              link_up), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_setup_phy_link_speed - Set auto advertise
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *
+ *  Sets the auto advertised capabilities
+ **/
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                               bool autoneg,
+                               bool autoneg_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed,
+                              autoneg, autoneg_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_check_link - Get link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                     bool *link_up, bool link_up_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed,
+                              link_up, link_up_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_tx_laser - Disable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to disable the laser on SFI optics.
+ **/
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.disable_tx_laser)
+               hw->mac.ops.disable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_enable_tx_laser - Enable Tx laser
+ *  @hw: pointer to hardware structure
+ *
+ *  If the driver needs to enable the laser on SFI optics.
+ **/
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.enable_tx_laser)
+               hw->mac.ops.enable_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_flap_tx_laser - flap Tx laser to start autotry process
+ *  @hw: pointer to hardware structure
+ *
+ *  When the driver changes the link speeds that it can support then
+ *  flap the tx laser to alert the link partner to start autotry
+ *  process on its end.
+ **/
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw)
+{
+       if (hw->mac.ops.flap_tx_laser)
+               hw->mac.ops.flap_tx_laser(hw);
+}
+
+/**
+ *  ixgbe_setup_link - Set link speed
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *
+ *  Configures link settings.  Restarts the link.
+ *  Performs autonegotiation if needed.
+ **/
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                           bool autoneg,
+                           bool autoneg_wait_to_complete)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed,
+                              autoneg, autoneg_wait_to_complete),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_link_capabilities - Returns link capabilities
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the link capabilities of the current configuration.
+ **/
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                                bool *autoneg)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw,
+                              speed, autoneg), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_on - Turn on LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ *
+ *  Turns on the software controllable LEDs.
+ **/
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_led_off - Turn off LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ *
+ *  Turns off the software controllable LEDs.
+ **/
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_start - Blink LEDs
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ *
+ *  Blink LED based on index.
+ **/
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_blink_led_stop - Stop blinking LEDs
+ *  @hw: pointer to hardware structure
+ *
+ *  Stop blinking LED based on index.
+ **/
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_eeprom_params - Initialize EEPROM parameters
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+
+/**
+ *  ixgbe_write_eeprom - Write word to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word to be written to the EEPROM
+ *
+ *  Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer - Write word(s) to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *  @words: number of words
+ *
+ *  Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not
+ *  called after this function, the EEPROM will most likely contain an
+ *  invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words,
+                              u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer,
+                              (hw, offset, words, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom - Read word from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM
+ **/
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer - Read word(s) from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit word(s) from EEPROM
+ *  @words: number of words
+ *
+ *  Reads 16 bit word(s) from EEPROM
+ **/
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                             u16 words, u16 *data)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer,
+                              (hw, offset, words, data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum
+ **/
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum,
+                              (hw, checksum_val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_insert_mac_addr - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr,
+                              (hw, addr, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_rar - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set"
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                  u32 enable_addr)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq,
+                              enable_addr), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_rar - Clear Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vmdq - Associate a VMDq index with a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to associate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address
+ *  @hw: pointer to hardware structure
+ *  @rar: receive address register index to disassociate with VMDq index
+ *  @vmdq: VMDq set or pool index
+ **/
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_rx_addrs - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_num_rx_addrs - Returns the number of RAR entries.
+ *  @hw: pointer to hardware structure
+ **/
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw)
+{
+       return hw->mac.num_rar_entries;
+}
+
+/**
+ *  ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new multicast addresses
+ *  @addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the secondary addrs from
+ *  receive address registers. Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ **/
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+                              u32 addr_count, ixgbe_mc_addr_itr func)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw,
+                              addr_list, addr_count, func),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @func: iterator function to walk the multicast address list
+ *
+ *  The given list replaces any existing list. Clears the MC addrs from receive
+ *  address registers and the multicast table. Uses unused receive address
+ *  registers for the first multicast addresses, and hashes the rest into the
+ *  multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                              u32 mc_addr_count, ixgbe_mc_addr_itr func,
+                              bool clear)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw,
+                              mc_addr_list, mc_addr_count, func, clear),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_enable_mc - Enable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_disable_mc - Disable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_set_vfta - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFTA
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFTA
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind,
+                              vlan_on), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_fc_enable - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Configures the flow control settings based on SW configuration.
+ **/
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw, packetbuf_num),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW
+ * @hw: pointer to hardware structure
+ * @maj: driver major number to be sent to firmware
+ * @min: driver minor number to be sent to firmware
+ * @build: driver build number to be sent to firmware
+ * @ver: driver version number to be sent to firmware
+ **/
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+                         u8 ver)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min,
+                              build, ver), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_analog_reg8 - Reads 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to read
+ *  @val: read value
+ *
+ *  Performs write operation to analog register specified.
+ **/
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg,
+                              val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_analog_reg8 - Writes 8 bit analog register
+ *  @hw: pointer to hardware structure
+ *  @reg: analog register to write
+ *  @val: value to write
+ *
+ *  Performs write operation to Atlas analog register specified.
+ **/
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg,
+                              val), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_init_uta_tables - Initializes Unicast Table Arrays.
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the Unicast Table Arrays to zero on device load.  This
+ *  is part of the Rx init addr execution path.
+ **/
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                        u8 *data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset,
+                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_byte - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface
+ *  at a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                         u8 data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset,
+                              dev_addr, data), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to write
+ *  @eeprom_data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw,
+                           u8 byte_offset, u8 eeprom_data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom,
+                              (hw, byte_offset, eeprom_data),
+                              IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data)
+{
+       return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom,
+                             (hw, byte_offset, eeprom_data),
+                             IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer,
+                              (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN);
+}
+
+/**
+ *  ixgbe_enable_rx_dma - Enables Rx DMA unit, dependant on device specifics
+ *  @hw: pointer to hardware structure
+ *  @regval: bitfield to write to the Rx DMA register
+ *
+ *  Enables the Rx DMA unit of the device.
+ **/
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma,
+                              (hw, regval), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+       return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync,
+                              (hw, mask), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
+ *  ixgbe_release_swfw_semaphore - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through SW_FW_SYNC register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask)
+{
+       if (hw->mac.ops.release_swfw_sync)
+               hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_api.h
new file mode 100644 (file)
index 0000000..c41dd36
--- /dev/null
@@ -0,0 +1,168 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_API_H_
+#define _IXGBE_API_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_init_shared_code(struct ixgbe_hw *hw);
+
+s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw(struct ixgbe_hw *hw);
+void ixgbe_enable_relaxed_ordering(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+s32 ixgbe_identify_phy(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                       u16 *phy_data);
+s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+                        u16 phy_data);
+
+s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw);
+s32 ixgbe_check_phy_link(struct ixgbe_hw *hw,
+                         ixgbe_link_speed *speed,
+                         bool *link_up);
+s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw,
+                               ixgbe_link_speed speed,
+                               bool autoneg,
+                               bool autoneg_wait_to_complete);
+void ixgbe_disable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_enable_tx_laser(struct ixgbe_hw *hw);
+void ixgbe_flap_tx_laser(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+                           bool autoneg, bool autoneg_wait_to_complete);
+s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                     bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                            bool *autoneg);
+s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                              u16 words, u16 *data);
+s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset,
+                             u16 words, u16 *data);
+
+s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                  u32 enable_addr);
+s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw);
+s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list,
+                              u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                              u32 mc_addr_count, ixgbe_mc_addr_itr func,
+                              bool clear);
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq);
+s32 ixgbe_enable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc(struct ixgbe_hw *hw);
+s32 ixgbe_clear_vfta(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
+                   u32 vind, bool vlan_on);
+
+s32 ixgbe_fc_enable(struct ixgbe_hw *hw, s32 packetbuf_num);
+s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+                         u8 ver);
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr);
+s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw,
+                                   u16 *firmware_version);
+s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val);
+s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val);
+s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data);
+u32 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
+s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
+s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl);
+s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+                                          union ixgbe_atr_hash_dword input,
+                                         union ixgbe_atr_hash_dword common,
+                                          u8 queue);
+s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+                                   union ixgbe_atr_input *input_mask);
+s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id, u8 queue);
+s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
+                                         union ixgbe_atr_input *input,
+                                         u16 soft_id);
+s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+                                        union ixgbe_atr_input *input,
+                                        union ixgbe_atr_input *mask,
+                                        u16 soft_id,
+                                        u8 queue);
+void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+                                         union ixgbe_atr_input *mask);
+u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
+                                     union ixgbe_atr_hash_dword common);
+s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                        u8 *data);
+s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr,
+                         u8 data);
+s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data);
+s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps);
+s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                         u16 *wwpn_prefix);
+s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs);
+
+
+#endif /* _IXGBE_API_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.c
new file mode 100644 (file)
index 0000000..e612f6a
--- /dev/null
@@ -0,0 +1,4049 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+                                        u16 count);
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
+
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+                                        u16 *san_mac_offset);
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+                                             u16 words, u16 *data);
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+                                              u16 words, u16 *data);
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+                                                 u16 offset);
+
+
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan);
+
+/**
+ *  ixgbe_init_ops_generic - Inits function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       struct ixgbe_mac_info *mac = &hw->mac;
+       u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       DEBUGFUNC("ixgbe_init_ops_generic");
+
+       /* EEPROM */
+       eeprom->ops.init_params = &ixgbe_init_eeprom_params_generic;
+       /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */
+       if (eec & IXGBE_EEC_PRES) {
+               eeprom->ops.read = &ixgbe_read_eerd_generic;
+               eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_generic;
+       } else {
+               eeprom->ops.read = &ixgbe_read_eeprom_bit_bang_generic;
+               eeprom->ops.read_buffer =
+                                &ixgbe_read_eeprom_buffer_bit_bang_generic;
+       }
+       eeprom->ops.write = &ixgbe_write_eeprom_generic;
+       eeprom->ops.write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic;
+       eeprom->ops.validate_checksum =
+                                     &ixgbe_validate_eeprom_checksum_generic;
+       eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_generic;
+       eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_generic;
+
+       /* MAC */
+       mac->ops.init_hw = &ixgbe_init_hw_generic;
+       mac->ops.reset_hw = NULL;
+       mac->ops.start_hw = &ixgbe_start_hw_generic;
+       mac->ops.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic;
+       mac->ops.get_media_type = NULL;
+       mac->ops.get_supported_physical_layer = NULL;
+       mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_generic;
+       mac->ops.get_mac_addr = &ixgbe_get_mac_addr_generic;
+       mac->ops.stop_adapter = &ixgbe_stop_adapter_generic;
+       mac->ops.get_bus_info = &ixgbe_get_bus_info_generic;
+       mac->ops.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie;
+       mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync;
+       mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync;
+
+       /* LEDs */
+       mac->ops.led_on = &ixgbe_led_on_generic;
+       mac->ops.led_off = &ixgbe_led_off_generic;
+       mac->ops.blink_led_start = &ixgbe_blink_led_start_generic;
+       mac->ops.blink_led_stop = &ixgbe_blink_led_stop_generic;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_rar = &ixgbe_set_rar_generic;
+       mac->ops.clear_rar = &ixgbe_clear_rar_generic;
+       mac->ops.insert_mac_addr = NULL;
+       mac->ops.set_vmdq = NULL;
+       mac->ops.clear_vmdq = NULL;
+       mac->ops.init_rx_addrs = &ixgbe_init_rx_addrs_generic;
+       mac->ops.update_uc_addr_list = &ixgbe_update_uc_addr_list_generic;
+       mac->ops.update_mc_addr_list = &ixgbe_update_mc_addr_list_generic;
+       mac->ops.enable_mc = &ixgbe_enable_mc_generic;
+       mac->ops.disable_mc = &ixgbe_disable_mc_generic;
+       mac->ops.clear_vfta = NULL;
+       mac->ops.set_vfta = NULL;
+       mac->ops.init_uta_tables = NULL;
+
+       /* Flow Control */
+       mac->ops.fc_enable = &ixgbe_fc_enable_generic;
+
+       /* Link */
+       mac->ops.get_link_capabilities = NULL;
+       mac->ops.setup_link = NULL;
+       mac->ops.check_link = NULL;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
+{
+       u32 ctrl_ext;
+
+       DEBUGFUNC("ixgbe_start_hw_generic");
+
+       /* Set the media type */
+       hw->phy.media_type = hw->mac.ops.get_media_type(hw);
+
+       /* PHY ops initialization must be done in reset_hw() */
+
+       /* Clear the VLAN filter table */
+       hw->mac.ops.clear_vfta(hw);
+
+       /* Clear statistics registers */
+       hw->mac.ops.clear_hw_cntrs(hw);
+
+       /* Set No Snoop Disable */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_NS_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Setup flow control */
+       ixgbe_setup_fc(hw, 0);
+
+       /* Clear adapter stopped flag */
+       hw->adapter_stopped = FALSE;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_start_hw_gen2 - Init sequence for common device family
+ *  @hw: pointer to hw structure
+ *
+ * Performs the init sequence common to the second generation
+ * of 10 GbE devices.
+ * Devices in the second generation:
+ *     82599
+ *     X540
+ **/
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
+{
+       u32 i;
+       u32 regval;
+
+       /* Clear the rate limiters */
+       for (i = 0; i < hw->mac.max_tx_queues; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0);
+       }
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Disable relaxed ordering */
+       for (i = 0; i < hw->mac.max_tx_queues; i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+               regval &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+       }
+
+       for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+               regval &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                                       IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_hw_generic - Generic hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware, filling the bus info
+ *  structure and media type, clears all on chip counters, initializes receive
+ *  address registers, multicast table, VLAN filter table, calls routine to set
+ *  up link and flow control settings, and leaves transmit and receive units
+ *  disabled and uninitialized
+ **/
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_init_hw_generic");
+
+       /* Reset the hardware */
+       status = hw->mac.ops.reset_hw(hw);
+
+       if (status == IXGBE_SUCCESS) {
+               /* Start the HW */
+               status = hw->mac.ops.start_hw(hw);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears all hardware statistics counters by reading them from the hardware
+ *  Statistics counters are clear on read.
+ **/
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
+{
+       u16 i = 0;
+
+       DEBUGFUNC("ixgbe_clear_hw_cntrs_generic");
+
+       IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+       IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+       IXGBE_READ_REG(hw, IXGBE_ERRBC);
+       IXGBE_READ_REG(hw, IXGBE_MSPDC);
+       for (i = 0; i < 8; i++)
+               IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
+       IXGBE_READ_REG(hw, IXGBE_MLFC);
+       IXGBE_READ_REG(hw, IXGBE_MRFC);
+       IXGBE_READ_REG(hw, IXGBE_RLEC);
+       IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       if (hw->mac.type >= ixgbe_mac_82599EB) {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+       }
+
+       for (i = 0; i < 8; i++) {
+               IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+               IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+                       IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               }
+       }
+       if (hw->mac.type >= ixgbe_mac_82599EB)
+               for (i = 0; i < 8; i++)
+                       IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+       IXGBE_READ_REG(hw, IXGBE_PRC64);
+       IXGBE_READ_REG(hw, IXGBE_PRC127);
+       IXGBE_READ_REG(hw, IXGBE_PRC255);
+       IXGBE_READ_REG(hw, IXGBE_PRC511);
+       IXGBE_READ_REG(hw, IXGBE_PRC1023);
+       IXGBE_READ_REG(hw, IXGBE_PRC1522);
+       IXGBE_READ_REG(hw, IXGBE_GPRC);
+       IXGBE_READ_REG(hw, IXGBE_BPRC);
+       IXGBE_READ_REG(hw, IXGBE_MPRC);
+       IXGBE_READ_REG(hw, IXGBE_GPTC);
+       IXGBE_READ_REG(hw, IXGBE_GORCL);
+       IXGBE_READ_REG(hw, IXGBE_GORCH);
+       IXGBE_READ_REG(hw, IXGBE_GOTCL);
+       IXGBE_READ_REG(hw, IXGBE_GOTCH);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               for (i = 0; i < 8; i++)
+                       IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+       IXGBE_READ_REG(hw, IXGBE_RUC);
+       IXGBE_READ_REG(hw, IXGBE_RFC);
+       IXGBE_READ_REG(hw, IXGBE_ROC);
+       IXGBE_READ_REG(hw, IXGBE_RJC);
+       IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+       IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+       IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+       IXGBE_READ_REG(hw, IXGBE_TORL);
+       IXGBE_READ_REG(hw, IXGBE_TORH);
+       IXGBE_READ_REG(hw, IXGBE_TPR);
+       IXGBE_READ_REG(hw, IXGBE_TPT);
+       IXGBE_READ_REG(hw, IXGBE_PTC64);
+       IXGBE_READ_REG(hw, IXGBE_PTC127);
+       IXGBE_READ_REG(hw, IXGBE_PTC255);
+       IXGBE_READ_REG(hw, IXGBE_PTC511);
+       IXGBE_READ_REG(hw, IXGBE_PTC1023);
+       IXGBE_READ_REG(hw, IXGBE_PTC1522);
+       IXGBE_READ_REG(hw, IXGBE_MPTC);
+       IXGBE_READ_REG(hw, IXGBE_BPTC);
+       for (i = 0; i < 16; i++) {
+               IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               if (hw->mac.type >= ixgbe_mac_82599EB) {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBRC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC_H(i));
+                       IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+               } else {
+                       IXGBE_READ_REG(hw, IXGBE_QBRC(i));
+                       IXGBE_READ_REG(hw, IXGBE_QBTC(i));
+               }
+       }
+
+       if (hw->mac.type == ixgbe_mac_X540) {
+               if (hw->phy.id == 0)
+                       ixgbe_identify_phy(hw);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
+               hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
+ *
+ *  Reads the part number string from the EEPROM.
+ **/
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size)
+{
+       s32 ret_val;
+       u16 data;
+       u16 pba_ptr;
+       u16 offset;
+       u16 length;
+
+       DEBUGFUNC("ixgbe_read_pba_string_generic");
+
+       if (pba_num == NULL) {
+               DEBUGOUT("PBA string buffer was null\n");
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               return ret_val;
+       }
+
+       ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               return ret_val;
+       }
+
+       /*
+        * if data is not ptr guard the PBA must be in legacy format which
+        * means pba_ptr is actually our second data word for the PBA number
+        * and we can decode it into an ascii string
+        */
+       if (data != IXGBE_PBANUM_PTR_GUARD) {
+               DEBUGOUT("NVM PBA number is not stored as string\n");
+
+               /* we will need 11 characters to store the PBA */
+               if (pba_num_size < 11) {
+                       DEBUGOUT("PBA string buffer too small\n");
+                       return IXGBE_ERR_NO_SPACE;
+               }
+
+               /* extract hex string from data and pba_ptr */
+               pba_num[0] = (data >> 12) & 0xF;
+               pba_num[1] = (data >> 8) & 0xF;
+               pba_num[2] = (data >> 4) & 0xF;
+               pba_num[3] = data & 0xF;
+               pba_num[4] = (pba_ptr >> 12) & 0xF;
+               pba_num[5] = (pba_ptr >> 8) & 0xF;
+               pba_num[6] = '-';
+               pba_num[7] = 0;
+               pba_num[8] = (pba_ptr >> 4) & 0xF;
+               pba_num[9] = pba_ptr & 0xF;
+
+               /* put a null character on the end of our string */
+               pba_num[10] = '\0';
+
+               /* switch all the data but the '-' to hex char */
+               for (offset = 0; offset < 10; offset++) {
+                       if (pba_num[offset] < 0xA)
+                               pba_num[offset] += '0';
+                       else if (pba_num[offset] < 0x10)
+                               pba_num[offset] += 'A' - 0xA;
+               }
+
+               return IXGBE_SUCCESS;
+       }
+
+       ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               return ret_val;
+       }
+
+       if (length == 0xFFFF || length == 0) {
+               DEBUGOUT("NVM PBA number section invalid length\n");
+               return IXGBE_ERR_PBA_SECTION;
+       }
+
+       /* check if pba_num buffer is big enough */
+       if (pba_num_size  < (((u32)length * 2) - 1)) {
+               DEBUGOUT("PBA string buffer too small\n");
+               return IXGBE_ERR_NO_SPACE;
+       }
+
+       /* trim pba length from start of string */
+       pba_ptr++;
+       length--;
+
+       for (offset = 0; offset < length; offset++) {
+               ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+               if (ret_val) {
+                       DEBUGOUT("NVM Read Error\n");
+                       return ret_val;
+               }
+               pba_num[offset * 2] = (u8)(data >> 8);
+               pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+       }
+       pba_num[offset * 2] = '\0';
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @pba_num: stores the part number from the EEPROM
+ *
+ *  Reads the part number from the EEPROM.
+ **/
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+{
+       s32 ret_val;
+       u16 data;
+
+       DEBUGFUNC("ixgbe_read_pba_num_generic");
+
+       ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               return ret_val;
+       } else if (data == IXGBE_PBANUM_PTR_GUARD) {
+               DEBUGOUT("NVM Not supported\n");
+               return IXGBE_NOT_IMPLEMENTED;
+       }
+       *pba_num = (u32)(data << 16);
+
+       ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+       if (ret_val) {
+               DEBUGOUT("NVM Read Error\n");
+               return ret_val;
+       }
+       *pba_num |= data;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_mac_addr_generic - Generic get MAC address
+ *  @hw: pointer to hardware structure
+ *  @mac_addr: Adapter MAC address
+ *
+ *  Reads the adapter's MAC address from first Receive Address Register (RAR0)
+ *  A reset of the adapter must be performed prior to calling this function
+ *  in order for the MAC address to have been loaded from the EEPROM into RAR0
+ **/
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+       u32 rar_high;
+       u32 rar_low;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_get_mac_addr_generic");
+
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0));
+       rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0));
+
+       for (i = 0; i < 4; i++)
+               mac_addr[i] = (u8)(rar_low >> (i*8));
+
+       for (i = 0; i < 2; i++)
+               mac_addr[i+4] = (u8)(rar_high >> (i*8));
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_bus_info_generic - Generic set PCI bus info
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure
+ **/
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       u16 link_status;
+
+       DEBUGFUNC("ixgbe_get_bus_info_generic");
+
+       hw->bus.type = ixgbe_bus_type_pci_express;
+
+       /* Get the negotiated link width and speed from PCI config space */
+       link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+
+       switch (link_status & IXGBE_PCI_LINK_WIDTH) {
+       case IXGBE_PCI_LINK_WIDTH_1:
+               hw->bus.width = ixgbe_bus_width_pcie_x1;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_2:
+               hw->bus.width = ixgbe_bus_width_pcie_x2;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_4:
+               hw->bus.width = ixgbe_bus_width_pcie_x4;
+               break;
+       case IXGBE_PCI_LINK_WIDTH_8:
+               hw->bus.width = ixgbe_bus_width_pcie_x8;
+               break;
+       default:
+               hw->bus.width = ixgbe_bus_width_unknown;
+               break;
+       }
+
+       switch (link_status & IXGBE_PCI_LINK_SPEED) {
+       case IXGBE_PCI_LINK_SPEED_2500:
+               hw->bus.speed = ixgbe_bus_speed_2500;
+               break;
+       case IXGBE_PCI_LINK_SPEED_5000:
+               hw->bus.speed = ixgbe_bus_speed_5000;
+               break;
+       default:
+               hw->bus.speed = ixgbe_bus_speed_unknown;
+               break;
+       }
+
+       mac->ops.set_lan_id(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
+{
+       struct ixgbe_bus_info *bus = &hw->bus;
+       u32 reg;
+
+       DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie");
+
+       reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
+       bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT;
+       bus->lan_id = bus->func;
+
+       /* check for a port swap */
+       reg = IXGBE_READ_REG(hw, IXGBE_FACTPS);
+       if (reg & IXGBE_FACTPS_LFS)
+               bus->func ^= 0x1;
+}
+
+/**
+ *  ixgbe_stop_adapter_generic - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
+{
+       u32 reg_val;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_stop_adapter_generic");
+
+       /*
+        * Set the adapter_stopped flag so other driver functions stop touching
+        * the hardware
+        */
+       hw->adapter_stopped = TRUE;
+
+       /* Disable the receive unit */
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0);
+
+       /* Clear interrupt mask to stop interrupts from being generated */
+       IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
+
+       /* Clear any pending interrupts, flush previous writes */
+       IXGBE_READ_REG(hw, IXGBE_EICR);
+
+       /* Disable the transmit unit.  Each queue must be disabled. */
+       for (i = 0; i < hw->mac.max_tx_queues; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+       /* Disable the receive unit by stopping each queue */
+       for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               reg_val &= ~IXGBE_RXDCTL_ENABLE;
+               reg_val |= IXGBE_RXDCTL_SWFLSH;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val);
+       }
+
+       /* flush all queues disables */
+       IXGBE_WRITE_FLUSH(hw);
+       msec_delay(2);
+
+       /*
+        * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+        * access and verify no pending requests
+        */
+       return ixgbe_disable_pcie_master(hw);
+}
+
+/**
+ *  ixgbe_led_on_generic - Turns on the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn on
+ **/
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
+{
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       DEBUGFUNC("ixgbe_led_on_generic");
+
+       /* To turn on the LED, set mode to ON. */
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_led_off_generic - Turns off the software controllable LEDs.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to turn off
+ **/
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
+{
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       DEBUGFUNC("ixgbe_led_off_generic");
+
+       /* To turn off the LED, set mode to OFF. */
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_eeprom_params_generic - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       u32 eec;
+       u16 eeprom_size;
+
+       DEBUGFUNC("ixgbe_init_eeprom_params_generic");
+
+       if (eeprom->type == ixgbe_eeprom_uninitialized) {
+               eeprom->type = ixgbe_eeprom_none;
+               /* Set default semaphore delay to 10ms which is a well
+                * tested value */
+               eeprom->semaphore_delay = 10;
+               /* Clear EEPROM page size, it will be initialized as needed */
+               eeprom->word_page_size = 0;
+
+               /*
+                * Check for EEPROM present first.
+                * If not present leave as none
+                */
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               if (eec & IXGBE_EEC_PRES) {
+                       eeprom->type = ixgbe_eeprom_spi;
+
+                       /*
+                        * SPI EEPROM is assumed here.  This code would need to
+                        * change if a future EEPROM is not SPI.
+                        */
+                       eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+                                           IXGBE_EEC_SIZE_SHIFT);
+                       eeprom->word_size = 1 << (eeprom_size +
+                                            IXGBE_EEPROM_WORD_SIZE_SHIFT);
+               }
+
+               if (eec & IXGBE_EEC_ADDR_SIZE)
+                       eeprom->address_bits = 16;
+               else
+                       eeprom->address_bits = 8;
+               DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: "
+                         "%d\n", eeprom->type, eeprom->word_size,
+                         eeprom->address_bits);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to write
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) to write to EEPROM
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                               u16 words, u16 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 i, count;
+
+       DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (words == 0) {
+               status = IXGBE_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       if (offset + words > hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       /*
+        * The EEPROM page size cannot be queried from the chip. We do lazy
+        * initialization. It is worth to do that when we write large buffer.
+        */
+       if ((hw->eeprom.word_page_size == 0) &&
+           (words > IXGBE_EEPROM_PAGE_SIZE_MAX))
+               ixgbe_detect_eeprom_page_size_generic(hw, offset);
+
+       /*
+        * We cannot hold synchronization semaphores for too long
+        * to avoid other entity starvation. However it is more efficient
+        * to read in bursts than synchronizing access for each word.
+        */
+       for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+               count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+                       IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+               status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i,
+                                                           count, &data[i]);
+
+               if (status != IXGBE_SUCCESS)
+                       break;
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_write_eeprom_buffer_bit_bang - Writes 16 bit word(s) to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  If ixgbe_eeprom_update_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+                                              u16 words, u16 *data)
+{
+       s32 status;
+       u16 word;
+       u16 page_size;
+       u16 i;
+       u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI;
+
+       DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang");
+
+       /* Prepare the EEPROM for writing  */
+       status = ixgbe_acquire_eeprom(hw);
+
+       if (status == IXGBE_SUCCESS) {
+               if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+                       ixgbe_release_eeprom(hw);
+                       status = IXGBE_ERR_EEPROM;
+               }
+       }
+
+       if (status == IXGBE_SUCCESS) {
+               for (i = 0; i < words; i++) {
+                       ixgbe_standby_eeprom(hw);
+
+                       /*  Send the WRITE ENABLE command (8 bit opcode )  */
+                       ixgbe_shift_out_eeprom_bits(hw,
+                                                  IXGBE_EEPROM_WREN_OPCODE_SPI,
+                                                  IXGBE_EEPROM_OPCODE_BITS);
+
+                       ixgbe_standby_eeprom(hw);
+
+                       /*
+                        * Some SPI eeproms use the 8th address bit embedded
+                        * in the opcode
+                        */
+                       if ((hw->eeprom.address_bits == 8) &&
+                           ((offset + i) >= 128))
+                               write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+                       /* Send the Write command (8-bit opcode + addr) */
+                       ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+                                                   IXGBE_EEPROM_OPCODE_BITS);
+                       ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+                                                   hw->eeprom.address_bits);
+
+                       page_size = hw->eeprom.word_page_size;
+
+                       /* Send the data in burst via SPI*/
+                       do {
+                               word = data[i];
+                               word = (word >> 8) | (word << 8);
+                               ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+                               if (page_size == 0)
+                                       break;
+
+                               /* do not wrap around page */
+                               if (((offset + i) & (page_size - 1)) ==
+                                   (page_size - 1))
+                                       break;
+                       } while (++i < words);
+
+                       ixgbe_standby_eeprom(hw);
+                       msec_delay(10);
+               }
+               /* Done with writing - release the EEPROM */
+               ixgbe_release_eeprom(hw);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_write_eeprom_generic - Writes 16 bit value to EEPROM
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @data: 16 bit word to be written to the EEPROM
+ *
+ *  If ixgbe_eeprom_update_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_write_eeprom_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (offset >= hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit words(s) from EEPROM
+ *  @words: number of word(s)
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                              u16 words, u16 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 i, count;
+
+       DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (words == 0) {
+               status = IXGBE_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       if (offset + words > hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       /*
+        * We cannot hold synchronization semaphores for too long
+        * to avoid other entity starvation. However it is more efficient
+        * to read in bursts than synchronizing access for each word.
+        */
+       for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) {
+               count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ?
+                       IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i);
+
+               status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
+                                                          count, &data[i]);
+
+               if (status != IXGBE_SUCCESS)
+                       break;
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_buffer_bit_bang - Read EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @words: number of word(s)
+ *  @data: read 16 bit word(s) from EEPROM
+ *
+ *  Reads 16 bit word(s) from EEPROM through bit-bang method
+ **/
+static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
+                                             u16 words, u16 *data)
+{
+       s32 status;
+       u16 word_in;
+       u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang");
+
+       /* Prepare the EEPROM for reading  */
+       status = ixgbe_acquire_eeprom(hw);
+
+       if (status == IXGBE_SUCCESS) {
+               if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) {
+                       ixgbe_release_eeprom(hw);
+                       status = IXGBE_ERR_EEPROM;
+               }
+       }
+
+       if (status == IXGBE_SUCCESS) {
+               for (i = 0; i < words; i++) {
+                       ixgbe_standby_eeprom(hw);
+                       /*
+                        * Some SPI eeproms use the 8th address bit embedded
+                        * in the opcode
+                        */
+                       if ((hw->eeprom.address_bits == 8) &&
+                           ((offset + i) >= 128))
+                               read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
+
+                       /* Send the READ command (opcode + addr) */
+                       ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+                                                   IXGBE_EEPROM_OPCODE_BITS);
+                       ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+                                                   hw->eeprom.address_bits);
+
+                       /* Read the data. */
+                       word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+                       data[i] = (word_in >> 8) | (word_in << 8);
+               }
+
+               /* End this read operation */
+               ixgbe_release_eeprom(hw);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be read
+ *  @data: read 16 bit value from EEPROM
+ *
+ *  Reads 16 bit value from EEPROM through bit-bang method
+ **/
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                       u16 *data)
+{
+       s32 status;
+
+       DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (offset >= hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_read_eerd_buffer_generic - Read EEPROM word(s) using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of word(s)
+ *  @data: 16 bit word(s) from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+                                   u16 words, u16 *data)
+{
+       u32 eerd;
+       s32 status = IXGBE_SUCCESS;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_read_eerd_buffer_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (words == 0) {
+               status = IXGBE_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       if (offset >= hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       for (i = 0; i < words; i++) {
+               eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) +
+                      IXGBE_EEPROM_RW_REG_START;
+
+               IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
+               status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
+
+               if (status == IXGBE_SUCCESS) {
+                       data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
+                                IXGBE_EEPROM_RW_REG_DATA);
+               } else {
+                       DEBUGOUT("Eeprom read timed out\n");
+                       goto out;
+               }
+       }
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_detect_eeprom_page_size_generic - Detect EEPROM page size
+ *  @hw: pointer to hardware structure
+ *  @offset: offset within the EEPROM to be used as a scratch pad
+ *
+ *  Discover EEPROM page size by writing marching data at given offset.
+ *  This function is called only when we are writing a new large buffer
+ *  at given offset so the data would be overwritten anyway.
+ **/
+static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
+                                                 u16 offset)
+{
+       u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
+       s32 status = IXGBE_SUCCESS;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic");
+
+       for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
+               data[i] = i;
+
+       hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX;
+       status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
+                                            IXGBE_EEPROM_PAGE_SIZE_MAX, data);
+       hw->eeprom.word_page_size = 0;
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       /*
+        * When writing in burst more than the actual page size
+        * EEPROM address wraps around current page.
+        */
+       hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
+
+       DEBUGOUT1("Detected EEPROM page size = %d words.",
+                 hw->eeprom.word_page_size);
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_read_eerd_generic - Read EEPROM word using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       return ixgbe_read_eerd_buffer_generic(hw, offset, 1, data);
+}
+
+/**
+ *  ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of word(s)
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+                                    u16 words, u16 *data)
+{
+       u32 eewr;
+       s32 status = IXGBE_SUCCESS;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_write_eewr_generic");
+
+       hw->eeprom.ops.init_params(hw);
+
+       if (words == 0) {
+               status = IXGBE_ERR_INVALID_ARGUMENT;
+               goto out;
+       }
+
+       if (offset >= hw->eeprom.word_size) {
+               status = IXGBE_ERR_EEPROM;
+               goto out;
+       }
+
+       for (i = 0; i < words; i++) {
+               eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+                      (data[i] << IXGBE_EEPROM_RW_REG_DATA) |
+                      IXGBE_EEPROM_RW_REG_START;
+
+               status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+               if (status != IXGBE_SUCCESS) {
+                       DEBUGOUT("Eeprom write EEWR timed out\n");
+                       goto out;
+               }
+
+               IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+               status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+               if (status != IXGBE_SUCCESS) {
+                       DEBUGOUT("Eeprom write EEWR timed out\n");
+                       goto out;
+               }
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_write_eewr_generic - Write EEPROM word using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       return ixgbe_write_eewr_buffer_generic(hw, offset, 1, &data);
+}
+
+/**
+ *  ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
+ *  @hw: pointer to hardware structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
+ *  read or write is done respectively.
+ **/
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+{
+       u32 i;
+       u32 reg;
+       s32 status = IXGBE_ERR_EEPROM;
+
+       DEBUGFUNC("ixgbe_poll_eerd_eewr_done");
+
+       for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
+               if (ee_reg == IXGBE_NVM_POLL_READ)
+                       reg = IXGBE_READ_REG(hw, IXGBE_EERD);
+               else
+                       reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
+
+               if (reg & IXGBE_EEPROM_RW_REG_DONE) {
+                       status = IXGBE_SUCCESS;
+                       break;
+               }
+               usec_delay(5);
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang
+ *  @hw: pointer to hardware structure
+ *
+ *  Prepares EEPROM for access using bit-bang method. This function should
+ *  be called before issuing a command to the EEPROM.
+ **/
+static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 eec;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_acquire_eeprom");
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)
+           != IXGBE_SUCCESS)
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       if (status == IXGBE_SUCCESS) {
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+               /* Request EEPROM Access */
+               eec |= IXGBE_EEC_REQ;
+               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+               for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+                       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+                       if (eec & IXGBE_EEC_GNT)
+                               break;
+                       usec_delay(5);
+               }
+
+               /* Release if grant not acquired */
+               if (!(eec & IXGBE_EEC_GNT)) {
+                       eec &= ~IXGBE_EEC_REQ;
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+                       DEBUGOUT("Could not acquire EEPROM grant\n");
+
+                       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+                       status = IXGBE_ERR_EEPROM;
+               }
+
+               /* Setup EEPROM for Read/Write */
+               if (status == IXGBE_SUCCESS) {
+                       /* Clear CS and SK */
+                       eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+                       IXGBE_WRITE_FLUSH(hw);
+                       usec_delay(1);
+               }
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_get_eeprom_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so EEPROM access can occur for bit-bang method
+ **/
+static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_EEPROM;
+       u32 timeout = 2000;
+       u32 i;
+       u32 swsm;
+
+       DEBUGFUNC("ixgbe_get_eeprom_semaphore");
+
+
+       /* Get SMBI software semaphore between device drivers first */
+       for (i = 0; i < timeout; i++) {
+               /*
+                * If the SMBI bit is 0 when we read it, then the bit will be
+                * set and we have the semaphore
+                */
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+               if (!(swsm & IXGBE_SWSM_SMBI)) {
+                       status = IXGBE_SUCCESS;
+                       break;
+               }
+               usec_delay(50);
+       }
+
+       if (i == timeout) {
+               DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore "
+                        "not granted.\n");
+               /*
+                * this release is particularly important because our attempts
+                * above to get the semaphore may have succeeded, and if there
+                * was a timeout, we should unconditionally clear the semaphore
+                * bits to free the driver to make progress
+                */
+               ixgbe_release_eeprom_semaphore(hw);
+
+               usec_delay(50);
+               /*
+                * one last try
+                * If the SMBI bit is 0 when we read it, then the bit will be
+                * set and we have the semaphore
+                */
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+               if (!(swsm & IXGBE_SWSM_SMBI))
+                       status = IXGBE_SUCCESS;
+       }
+
+       /* Now get the semaphore between SW/FW through the SWESMBI bit */
+       if (status == IXGBE_SUCCESS) {
+               for (i = 0; i < timeout; i++) {
+                       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+                       /* Set the SW EEPROM semaphore bit to request access */
+                       swsm |= IXGBE_SWSM_SWESMBI;
+                       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+                       /*
+                        * If we set the bit successfully then we got the
+                        * semaphore.
+                        */
+                       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+                       if (swsm & IXGBE_SWSM_SWESMBI)
+                               break;
+
+                       usec_delay(50);
+               }
+
+               /*
+                * Release semaphores and return error if SW EEPROM semaphore
+                * was not granted because we don't have access to the EEPROM
+                */
+               if (i >= timeout) {
+                       DEBUGOUT("SWESMBI Software EEPROM semaphore "
+                                "not granted.\n");
+                       ixgbe_release_eeprom_semaphore(hw);
+                       status = IXGBE_ERR_EEPROM;
+               }
+       } else {
+               DEBUGOUT("Software semaphore SMBI between device drivers "
+                        "not granted.\n");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_release_eeprom_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
+{
+       u32 swsm;
+
+       DEBUGFUNC("ixgbe_release_eeprom_semaphore");
+
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+
+       /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */
+       swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI);
+       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_ready_eeprom - Polls for EEPROM ready
+ *  @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 i;
+       u8 spi_stat_reg;
+
+       DEBUGFUNC("ixgbe_ready_eeprom");
+
+       /*
+        * Read "Status Register" repeatedly until the LSB is cleared.  The
+        * EEPROM will signal that the command has been completed by clearing
+        * bit 0 of the internal status register.  If it's not cleared within
+        * 5 milliseconds, then error out.
+        */
+       for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
+               ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
+                                           IXGBE_EEPROM_OPCODE_BITS);
+               spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
+               if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
+                       break;
+
+               usec_delay(5);
+               ixgbe_standby_eeprom(hw);
+       };
+
+       /*
+        * On some parts, SPI write time could vary from 0-20mSec on 3.3V
+        * devices (and only 0-5mSec on 5V devices)
+        */
+       if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
+               DEBUGOUT("SPI EEPROM Status error\n");
+               status = IXGBE_ERR_EEPROM;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_standby_eeprom - Returns EEPROM to a "standby" state
+ *  @hw: pointer to hardware structure
+ **/
+static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
+{
+       u32 eec;
+
+       DEBUGFUNC("ixgbe_standby_eeprom");
+
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       /* Toggle CS to flush commands */
+       eec |= IXGBE_EEC_CS;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(1);
+       eec &= ~IXGBE_EEC_CS;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(1);
+}
+
+/**
+ *  ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM.
+ *  @hw: pointer to hardware structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ **/
+static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
+                                        u16 count)
+{
+       u32 eec;
+       u32 mask;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_shift_out_eeprom_bits");
+
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       /*
+        * Mask is used to shift "count" bits of "data" out to the EEPROM
+        * one bit at a time.  Determine the starting bit based on count
+        */
+       mask = 0x01 << (count - 1);
+
+       for (i = 0; i < count; i++) {
+               /*
+                * A "1" is shifted out to the EEPROM by setting bit "DI" to a
+                * "1", and then raising and then lowering the clock (the SK
+                * bit controls the clock input to the EEPROM).  A "0" is
+                * shifted out to the EEPROM by setting "DI" to "0" and then
+                * raising and then lowering the clock.
+                */
+               if (data & mask)
+                       eec |= IXGBE_EEC_DI;
+               else
+                       eec &= ~IXGBE_EEC_DI;
+
+               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+               IXGBE_WRITE_FLUSH(hw);
+
+               usec_delay(1);
+
+               ixgbe_raise_eeprom_clk(hw, &eec);
+               ixgbe_lower_eeprom_clk(hw, &eec);
+
+               /*
+                * Shift mask to signify next bit of data to shift in to the
+                * EEPROM
+                */
+               mask = mask >> 1;
+       };
+
+       /* We leave the "DI" bit set to "0" when we leave this routine. */
+       eec &= ~IXGBE_EEC_DI;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ *  ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count)
+{
+       u32 eec;
+       u32 i;
+       u16 data = 0;
+
+       DEBUGFUNC("ixgbe_shift_in_eeprom_bits");
+
+       /*
+        * In order to read a register from the EEPROM, we need to shift
+        * 'count' bits in from the EEPROM. Bits are "shifted in" by raising
+        * the clock input to the EEPROM (setting the SK bit), and then reading
+        * the value of the "DO" bit.  During this "shifting in" process the
+        * "DI" bit should always be clear.
+        */
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI);
+
+       for (i = 0; i < count; i++) {
+               data = data << 1;
+               ixgbe_raise_eeprom_clk(hw, &eec);
+
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+               eec &= ~(IXGBE_EEC_DI);
+               if (eec & IXGBE_EEC_DO)
+                       data |= 1;
+
+               ixgbe_lower_eeprom_clk(hw, &eec);
+       }
+
+       return data;
+}
+
+/**
+ *  ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eec: EEC register's current value
+ **/
+static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+       DEBUGFUNC("ixgbe_raise_eeprom_clk");
+
+       /*
+        * Raise the clock input to the EEPROM
+        * (setting the SK bit), then delay
+        */
+       *eec = *eec | IXGBE_EEC_SK;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(1);
+}
+
+/**
+ *  ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input.
+ *  @hw: pointer to hardware structure
+ *  @eecd: EECD's current value
+ **/
+static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec)
+{
+       DEBUGFUNC("ixgbe_lower_eeprom_clk");
+
+       /*
+        * Lower the clock input to the EEPROM (clearing the SK bit), then
+        * delay
+        */
+       *eec = *eec & ~IXGBE_EEC_SK;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec);
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(1);
+}
+
+/**
+ *  ixgbe_release_eeprom - Release EEPROM, release semaphores
+ *  @hw: pointer to hardware structure
+ **/
+static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
+{
+       u32 eec;
+
+       DEBUGFUNC("ixgbe_release_eeprom");
+
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+       eec |= IXGBE_EEC_CS;  /* Pull CS high */
+       eec &= ~IXGBE_EEC_SK; /* Lower SCK */
+
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       IXGBE_WRITE_FLUSH(hw);
+
+       usec_delay(1);
+
+       /* Stop requesting EEPROM access */
+       eec &= ~IXGBE_EEC_REQ;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+       /* Delay before attempt to obtain semaphore again to allow FW access */
+       msec_delay(hw->eeprom.semaphore_delay);
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum
+ *  @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+       u16 i;
+       u16 j;
+       u16 checksum = 0;
+       u16 length = 0;
+       u16 pointer = 0;
+       u16 word = 0;
+
+       DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic");
+
+       /* Include 0x0-0x3F in the checksum */
+       for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+               if (hw->eeprom.ops.read(hw, i, &word) != IXGBE_SUCCESS) {
+                       DEBUGOUT("EEPROM read failed\n");
+                       break;
+               }
+               checksum += word;
+       }
+
+       /* Include all data from pointers except for the fw pointer */
+       for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+               hw->eeprom.ops.read(hw, i, &pointer);
+
+               /* Make sure the pointer seems valid */
+               if (pointer != 0xFFFF && pointer != 0) {
+                       hw->eeprom.ops.read(hw, pointer, &length);
+
+                       if (length != 0xFFFF && length != 0) {
+                               for (j = pointer+1; j <= pointer+length; j++) {
+                                       hw->eeprom.ops.read(hw, j, &word);
+                                       checksum += word;
+                               }
+                       }
+               }
+       }
+
+       checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+       return checksum;
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+                                           u16 *checksum_val)
+{
+       s32 status;
+       u16 checksum;
+       u16 read_checksum = 0;
+
+       DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic");
+
+       /*
+        * Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+       if (status == IXGBE_SUCCESS) {
+               checksum = hw->eeprom.ops.calc_checksum(hw);
+
+               hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
+
+               /*
+                * Verify read checksum from EEPROM is the same as
+                * calculated checksum
+                */
+               if (read_checksum != checksum)
+                       status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+               /* If the user cares, return the calculated checksum */
+               if (checksum_val)
+                       *checksum_val = checksum;
+       } else {
+               DEBUGOUT("EEPROM read failed\n");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u16 checksum;
+
+       DEBUGFUNC("ixgbe_update_eeprom_checksum_generic");
+
+       /*
+        * Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+       if (status == IXGBE_SUCCESS) {
+               checksum = hw->eeprom.ops.calc_checksum(hw);
+               status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
+                                             checksum);
+       } else {
+               DEBUGOUT("EEPROM read failed\n");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_validate_mac_addr - Validate MAC address
+ *  @mac_addr: pointer to MAC address.
+ *
+ *  Tests a MAC address to ensure it is a valid Individual Address
+ **/
+s32 ixgbe_validate_mac_addr(u8 *mac_addr)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_validate_mac_addr");
+
+       /* Make sure it is not a multicast address */
+       if (IXGBE_IS_MULTICAST(mac_addr)) {
+               DEBUGOUT("MAC address is multicast\n");
+               status = IXGBE_ERR_INVALID_MAC_ADDR;
+       /* Not a broadcast address */
+       } else if (IXGBE_IS_BROADCAST(mac_addr)) {
+               DEBUGOUT("MAC address is broadcast\n");
+               status = IXGBE_ERR_INVALID_MAC_ADDR;
+       /* Reject the zero address */
+       } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 &&
+                  mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
+               DEBUGOUT("MAC address is all zeros\n");
+               status = IXGBE_ERR_INVALID_MAC_ADDR;
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_set_rar_generic - Set Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ *
+ *  Puts an ethernet address into a receive address register.
+ **/
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                          u32 enable_addr)
+{
+       u32 rar_low, rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_set_rar_generic");
+
+       /* Make sure we are using a valid rar index range */
+       if (index >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", index);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       /* setup VMDq pool selection before this RAR gets enabled */
+       hw->mac.ops.set_vmdq(hw, index, vmdq);
+
+       /*
+        * HW expects these in little endian so we reverse the byte
+        * order from network order (big endian) to little endian
+        */
+       rar_low = ((u32)addr[0] |
+                  ((u32)addr[1] << 8) |
+                  ((u32)addr[2] << 16) |
+                  ((u32)addr[3] << 24));
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+       rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8));
+
+       if (enable_addr != 0)
+               rar_high |= IXGBE_RAH_AV;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_rar_generic - Remove Rx address register
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *
+ *  Clears an ethernet address from a receive address register.
+ **/
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+{
+       u32 rar_high;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_clear_rar_generic");
+
+       /* Make sure we are using a valid rar index range */
+       if (index >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", index);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       /*
+        * Some parts put the VMDq setting in the extra RAH bits,
+        * so save everything except the lower 16 bits that hold part
+        * of the address and the address valid bit.
+        */
+       rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+       rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+       IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+       IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+       /* clear VMDq pool/queue selection for this RAR */
+       hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_rx_addrs_generic - Initializes receive address filters.
+ *  @hw: pointer to hardware structure
+ *
+ *  Places the MAC address in receive address register 0 and clears the rest
+ *  of the receive address registers. Clears the multicast table. Assumes
+ *  the receiver is in reset when the routine is called.
+ **/
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
+{
+       u32 i;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_init_rx_addrs_generic");
+
+       /*
+        * If the current mac address is valid, assume it is a software override
+        * to the permanent address.
+        * Otherwise, use the permanent address from the eeprom.
+        */
+       if (ixgbe_validate_mac_addr(hw->mac.addr) ==
+           IXGBE_ERR_INVALID_MAC_ADDR) {
+               /* Get the MAC address from the RAR0 for later reference */
+               hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+               DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
+                         hw->mac.addr[0], hw->mac.addr[1],
+                         hw->mac.addr[2]);
+               DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+                         hw->mac.addr[4], hw->mac.addr[5]);
+       } else {
+               /* Setup the receive address. */
+               DEBUGOUT("Overriding MAC Address in RAR[0]\n");
+               DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ",
+                         hw->mac.addr[0], hw->mac.addr[1],
+                         hw->mac.addr[2]);
+               DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3],
+                         hw->mac.addr[4], hw->mac.addr[5]);
+
+               hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+               /* clear VMDq pool/queue selection for RAR 0 */
+               hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
+       }
+       hw->addr_ctrl.overflow_promisc = 0;
+
+       hw->addr_ctrl.rar_used_count = 1;
+
+       /* Zero out the other receive addresses. */
+       DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1);
+       for (i = 1; i < rar_entries; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+       }
+
+       /* Clear the MTA */
+       hw->addr_ctrl.mta_in_use = 0;
+       IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+       DEBUGOUT(" Clearing MTA\n");
+       for (i = 0; i < hw->mac.mcft_size; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
+
+       ixgbe_init_uta_tables(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_add_uc_addr - Adds a secondary unicast address.
+ *  @hw: pointer to hardware structure
+ *  @addr: new address
+ *
+ *  Adds it to unused receive address register or goes into promiscuous mode.
+ **/
+void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       u32 rar_entries = hw->mac.num_rar_entries;
+       u32 rar;
+
+       DEBUGFUNC("ixgbe_add_uc_addr");
+
+       DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n",
+                 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+
+       /*
+        * Place this address in the RAR if there is room,
+        * else put the controller into promiscuous mode
+        */
+       if (hw->addr_ctrl.rar_used_count < rar_entries) {
+               rar = hw->addr_ctrl.rar_used_count;
+               hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+               DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar);
+               hw->addr_ctrl.rar_used_count++;
+       } else {
+               hw->addr_ctrl.overflow_promisc++;
+       }
+
+       DEBUGOUT("ixgbe_add_uc_addr Complete\n");
+}
+
+/**
+ *  ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
+ *  @hw: pointer to hardware structure
+ *  @addr_list: the list of new addresses
+ *  @addr_count: number of addresses
+ *  @next: iterator function to walk the address list
+ *
+ *  The given list replaces any existing list.  Clears the secondary addrs from
+ *  receive address registers.  Uses unused receive address registers for the
+ *  first secondary addresses, and falls back to promiscuous mode as needed.
+ *
+ *  Drivers using secondary unicast addresses must set user_set_promisc when
+ *  manually putting the device into promiscuous mode.
+ **/
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                                      u32 addr_count, ixgbe_mc_addr_itr next)
+{
+       u8 *addr;
+       u32 i;
+       u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
+       u32 uc_addr_in_use;
+       u32 fctrl;
+       u32 vmdq;
+
+       DEBUGFUNC("ixgbe_update_uc_addr_list_generic");
+
+       /*
+        * Clear accounting of old secondary address list,
+        * don't count RAR[0]
+        */
+       uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1;
+       hw->addr_ctrl.rar_used_count -= uc_addr_in_use;
+       hw->addr_ctrl.overflow_promisc = 0;
+
+       /* Zero out the other receive addresses */
+       DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1);
+       for (i = 0; i < uc_addr_in_use; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0);
+       }
+
+       /* Add the new addresses */
+       for (i = 0; i < addr_count; i++) {
+               DEBUGOUT(" Adding the secondary addresses:\n");
+               addr = next(hw, &addr_list, &vmdq);
+               ixgbe_add_uc_addr(hw, addr, vmdq);
+       }
+
+       if (hw->addr_ctrl.overflow_promisc) {
+               /* enable promisc if not already in overflow or set by user */
+               if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+                       DEBUGOUT(" Entering address overflow promisc mode\n");
+                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       fctrl |= IXGBE_FCTRL_UPE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+               }
+       } else {
+               /* only disable if set by overflow, not by user */
+               if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) {
+                       DEBUGOUT(" Leaving address overflow promisc mode\n");
+                       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+                       fctrl &= ~IXGBE_FCTRL_UPE;
+                       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+               }
+       }
+
+       DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n");
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+       u32 vector = 0;
+
+       DEBUGFUNC("ixgbe_mta_vector");
+
+       switch (hw->mac.mc_filter_type) {
+       case 0:   /* use bits [47:36] of the address */
+               vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+               break;
+       case 1:   /* use bits [46:35] of the address */
+               vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+               break;
+       case 2:   /* use bits [45:34] of the address */
+               vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+               break;
+       case 3:   /* use bits [43:32] of the address */
+               vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+               break;
+       default:  /* Invalid mc_filter_type */
+               DEBUGOUT("MC filter type param set incorrectly\n");
+               ASSERT(0);
+               break;
+       }
+
+       /* vector can only be 12-bits or boundary will be exceeded */
+       vector &= 0xFFF;
+       return vector;
+}
+
+/**
+ *  ixgbe_set_mta - Set bit-vector in multicast table
+ *  @hw: pointer to hardware structure
+ *  @hash_value: Multicast address hash value
+ *
+ *  Sets the bit-vector in the multicast table.
+ **/
+void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+       u32 vector;
+       u32 vector_bit;
+       u32 vector_reg;
+
+       DEBUGFUNC("ixgbe_set_mta");
+
+       hw->addr_ctrl.mta_in_use++;
+
+       vector = ixgbe_mta_vector(hw, mc_addr);
+       DEBUGOUT1(" bit-vector = 0x%03X\n", vector);
+
+       /*
+        * The MTA is a register array of 128 32-bit registers. It is treated
+        * like an array of 4096 bits.  We want to set bit
+        * BitArray[vector_value]. So we figure out what register the bit is
+        * in, read it, OR in the new bit, then write back the new value.  The
+        * register is determined by the upper 7 bits of the vector value and
+        * the bit within that register are determined by the lower 5 bits of
+        * the value.
+        */
+       vector_reg = (vector >> 5) & 0x7F;
+       vector_bit = vector & 0x1F;
+       hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+}
+
+/**
+ *  ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
+ *  @hw: pointer to hardware structure
+ *  @mc_addr_list: the list of new multicast addresses
+ *  @mc_addr_count: number of addresses
+ *  @next: iterator function to walk the multicast address list
+ *  @clear: flag, when set clears the table beforehand
+ *
+ *  When the clear flag is set, the given list replaces any existing list.
+ *  Hashes the given addresses into the multicast table.
+ **/
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                      u32 mc_addr_count, ixgbe_mc_addr_itr next,
+                                      bool clear)
+{
+       u32 i;
+       u32 vmdq;
+
+       DEBUGFUNC("ixgbe_update_mc_addr_list_generic");
+
+       /*
+        * Set the new number of MC addresses that we are being requested to
+        * use.
+        */
+       hw->addr_ctrl.num_mc_addrs = mc_addr_count;
+       hw->addr_ctrl.mta_in_use = 0;
+
+       /* Clear mta_shadow */
+       if (clear) {
+               DEBUGOUT(" Clearing MTA\n");
+               memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+       }
+
+       /* Update mta_shadow */
+       for (i = 0; i < mc_addr_count; i++) {
+               DEBUGOUT(" Adding the multicast addresses:\n");
+               ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq));
+       }
+
+       /* Enable mta */
+       for (i = 0; i < hw->mac.mcft_size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_MTA(0), i,
+                                     hw->mac.mta_shadow[i]);
+
+       if (hw->addr_ctrl.mta_in_use > 0)
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+                               IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+
+       DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n");
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_mc_generic - Enable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Enables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+       DEBUGFUNC("ixgbe_enable_mc_generic");
+
+       if (a->mta_in_use > 0)
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
+                               hw->mac.mc_filter_type);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_disable_mc_generic - Disable multicast address in RAR
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables multicast address in RAR and the use of the multicast hash table.
+ **/
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_addr_filter_info *a = &hw->addr_ctrl;
+
+       DEBUGFUNC("ixgbe_disable_mc_generic");
+
+       if (a->mta_in_use > 0)
+               IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_fc_enable_generic - Enable flow control
+ *  @hw: pointer to hardware structure
+ *  @packetbuf_num: packet buffer number (0-7)
+ *
+ *  Enable flow control according to the current settings.
+ **/
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 mflcn_reg, fccfg_reg;
+       u32 reg;
+       u32 fcrtl, fcrth;
+
+       DEBUGFUNC("ixgbe_fc_enable_generic");
+
+       /* Negotiate the fc mode to use */
+       ret_val = ixgbe_fc_autoneg(hw);
+       if (ret_val == IXGBE_ERR_FLOW_CONTROL)
+               goto out;
+
+       /* Disable any previous flow control settings */
+       mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+       mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE);
+
+       fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+       fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
+
+       /*
+        * The possible values of fc.current_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *    we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.current_mode) {
+       case ixgbe_fc_none:
+               /*
+                * Flow control is disabled by software override or autoneg.
+                * The code below will actually disable it in the HW.
+                */
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               mflcn_reg |= IXGBE_MFLCN_RFCE;
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               mflcn_reg |= IXGBE_MFLCN_RFCE;
+               fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       /* Set 802.3x based flow control settings. */
+       mflcn_reg |= IXGBE_MFLCN_DPF;
+       IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
+       IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
+
+       fcrth = hw->fc.high_water[packetbuf_num] << 10;
+       fcrtl = hw->fc.low_water << 10;
+
+       if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+               fcrth |= IXGBE_FCRTH_FCEN;
+               if (hw->fc.send_xon)
+                       fcrtl |= IXGBE_FCRTL_XONE;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
+       /* Configure pause time (2 TCs per register) */
+       reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
+       if ((packetbuf_num & 1) == 0)
+               reg = (reg & 0xFFFF0000) | hw->fc.pause_time;
+       else
+               reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
+
+       IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1));
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg - Configure flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Compares our advertised flow control capabilities to those advertised by
+ *  our link partner, and determines the proper flow control mode to use.
+ **/
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       ixgbe_link_speed speed;
+       bool link_up;
+
+       DEBUGFUNC("ixgbe_fc_autoneg");
+
+       if (hw->fc.disable_fc_autoneg)
+               goto out;
+
+       /*
+        * AN should have completed when the cable was plugged in.
+        * Look for reasons to bail out.  Bail out if:
+        * - FC autoneg is disabled, or if
+        * - link is not up.
+        *
+        * Since we're being called from an LSC, link is already known to be up.
+        * So use link_up_wait_to_complete=FALSE.
+        */
+       hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+       if (!link_up) {
+               ret_val = IXGBE_ERR_FLOW_CONTROL;
+               goto out;
+       }
+
+       switch (hw->phy.media_type) {
+       /* Autoneg flow control on fiber adapters */
+       case ixgbe_media_type_fiber:
+               if (speed == IXGBE_LINK_SPEED_1GB_FULL)
+                       ret_val = ixgbe_fc_autoneg_fiber(hw);
+               break;
+
+       /* Autoneg flow control on backplane adapters */
+       case ixgbe_media_type_backplane:
+               ret_val = ixgbe_fc_autoneg_backplane(hw);
+               break;
+
+       /* Autoneg flow control on copper adapters */
+       case ixgbe_media_type_copper:
+               if (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)
+                       ret_val = ixgbe_fc_autoneg_copper(hw);
+               break;
+
+       default:
+               break;
+       }
+
+out:
+       if (ret_val == IXGBE_SUCCESS) {
+               hw->fc.fc_was_autonegged = TRUE;
+       } else {
+               hw->fc.fc_was_autonegged = FALSE;
+               hw->fc.current_mode = hw->fc.requested_mode;
+       }
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_fiber - Enable flow control on 1 gig fiber
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according on 1 gig fiber.
+ **/
+static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
+{
+       u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
+       s32 ret_val;
+
+       /*
+        * On multispeed fiber at 1g, bail out if
+        * - link is up but AN did not complete, or if
+        * - link is up and AN completed but timed out
+        */
+
+       linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
+       if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
+           (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) {
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               goto out;
+       }
+
+       pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+       pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
+
+       ret_val =  ixgbe_negotiate_fc(hw, pcs_anadv_reg,
+                              pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE,
+                              IXGBE_PCS1GANA_SYM_PAUSE,
+                              IXGBE_PCS1GANA_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_backplane - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
+{
+       u32 links2, anlp1_reg, autoc_reg, links;
+       s32 ret_val;
+
+       /*
+        * On backplane, bail out if
+        * - backplane autoneg was not completed, or if
+        * - we are 82599 and link partner is not AN enabled
+        */
+       links = IXGBE_READ_REG(hw, IXGBE_LINKS);
+       if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) {
+               hw->fc.fc_was_autonegged = FALSE;
+               hw->fc.current_mode = hw->fc.requested_mode;
+               ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+               goto out;
+       }
+
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+               links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
+               if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) {
+                       hw->fc.fc_was_autonegged = FALSE;
+                       hw->fc.current_mode = hw->fc.requested_mode;
+                       ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+                       goto out;
+               }
+       }
+       /*
+        * Read the 10g AN autoc and LP ability registers and resolve
+        * local flow control settings accordingly
+        */
+       autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+
+       ret_val = ixgbe_negotiate_fc(hw, autoc_reg,
+               anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
+               IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_fc_autoneg_copper - Enable flow control IEEE clause 37
+ *  @hw: pointer to hardware structure
+ *
+ *  Enable flow control according to IEEE clause 37.
+ **/
+static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
+{
+       u16 technology_ability_reg = 0;
+       u16 lp_technology_ability_reg = 0;
+
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                            &technology_ability_reg);
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                            &lp_technology_ability_reg);
+
+       return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg,
+                                 (u32)lp_technology_ability_reg,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE,
+                                 IXGBE_TAF_SYM_PAUSE, IXGBE_TAF_ASM_PAUSE);
+}
+
+/**
+ *  ixgbe_negotiate_fc - Negotiate flow control
+ *  @hw: pointer to hardware structure
+ *  @adv_reg: flow control advertised settings
+ *  @lp_reg: link partner's flow control settings
+ *  @adv_sym: symmetric pause bit in advertisement
+ *  @adv_asm: asymmetric pause bit in advertisement
+ *  @lp_sym: symmetric pause bit in link partner advertisement
+ *  @lp_asm: asymmetric pause bit in link partner advertisement
+ *
+ *  Find the intersection between advertised settings and link partner's
+ *  advertised settings
+ **/
+static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
+                             u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
+{
+       if ((!(adv_reg)) ||  (!(lp_reg)))
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
+
+       if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
+               /*
+                * Now we need to check if the user selected Rx ONLY
+                * of pause frames.  In this case, we had to advertise
+                * FULL flow control because we could not advertise RX
+                * ONLY. Hence, we must now check to see if we need to
+                * turn OFF the TRANSMISSION of PAUSE frames.
+                */
+               if (hw->fc.requested_mode == ixgbe_fc_full) {
+                       hw->fc.current_mode = ixgbe_fc_full;
+                       DEBUGOUT("Flow Control = FULL.\n");
+               } else {
+                       hw->fc.current_mode = ixgbe_fc_rx_pause;
+                       DEBUGOUT("Flow Control=RX PAUSE frames only\n");
+               }
+       } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_tx_pause;
+               DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+       } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
+                  !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
+               hw->fc.current_mode = ixgbe_fc_rx_pause;
+               DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+       } else {
+               hw->fc.current_mode = ixgbe_fc_none;
+               DEBUGOUT("Flow Control = NONE.\n");
+       }
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_setup_fc - Set up flow control
+ *  @hw: pointer to hardware structure
+ *
+ *  Called at init time to set up flow control.
+ **/
+static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 reg = 0, reg_bp = 0;
+       u16 reg_cu = 0;
+
+       DEBUGFUNC("ixgbe_setup_fc");
+
+       /* Validate the packetbuf configuration */
+       if (packetbuf_num < 0 || packetbuf_num > 7) {
+               DEBUGOUT1("Invalid packet buffer number [%d], expected range "
+                         "is 0-7\n", packetbuf_num);
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * Validate the water mark configuration.  Zero water marks are invalid
+        * because it causes the controller to just blast out fc packets.
+        */
+       if (!hw->fc.low_water ||
+           !hw->fc.high_water[packetbuf_num] ||
+           !hw->fc.pause_time) {
+               DEBUGOUT("Invalid water mark configuration\n");
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * Validate the requested mode.  Strict IEEE mode does not allow
+        * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
+        */
+       if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+               DEBUGOUT("ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+               goto out;
+       }
+
+       /*
+        * 10gig parts do not have a word in the EEPROM to determine the
+        * default flow control setting, so we explicitly set it to full.
+        */
+       if (hw->fc.requested_mode == ixgbe_fc_default)
+               hw->fc.requested_mode = ixgbe_fc_full;
+
+       /*
+        * Set up the 1G and 10G flow control advertisement registers so the
+        * HW will be able to do fc autoneg once the cable is plugged in.  If
+        * we link at 10G, the 1G advertisement is harmless and vice versa.
+        */
+
+       switch (hw->phy.media_type) {
+       case ixgbe_media_type_fiber:
+       case ixgbe_media_type_backplane:
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+               reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+               break;
+
+       case ixgbe_media_type_copper:
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                                       IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &reg_cu);
+               break;
+
+       default:
+               ;
+       }
+
+       /*
+        * The possible values of fc.requested_mode are:
+        * 0: Flow control is completely disabled
+        * 1: Rx flow control is enabled (we can receive pause frames,
+        *    but not send pause frames).
+        * 2: Tx flow control is enabled (we can send pause frames but
+        *    we do not support receiving pause frames).
+        * 3: Both Rx and Tx flow control (symmetric) are enabled.
+        * other: Invalid.
+        */
+       switch (hw->fc.requested_mode) {
+       case ixgbe_fc_none:
+               /* Flow control completely disabled by software override. */
+               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
+                                   IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+               break;
+       case ixgbe_fc_rx_pause:
+               /*
+                * Rx Flow control is enabled and Tx Flow control is
+                * disabled by software override. Since there really
+                * isn't a way to advertise that we are capable of RX
+                * Pause ONLY, we will advertise that we support both
+                * symmetric and asymmetric Rx PAUSE.  Later, we will
+                * disable the adapter's ability to send PAUSE frames.
+                */
+               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+               break;
+       case ixgbe_fc_tx_pause:
+               /*
+                * Tx Flow control is enabled, and Rx Flow control is
+                * disabled by software override.
+                */
+               reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
+               reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane) {
+                       reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
+                       reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
+               } else if (hw->phy.media_type == ixgbe_media_type_copper) {
+                       reg_cu |= (IXGBE_TAF_ASM_PAUSE);
+                       reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
+               }
+               break;
+       case ixgbe_fc_full:
+               /* Flow control (both Rx and Tx) is enabled by SW override. */
+               reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
+               if (hw->phy.media_type == ixgbe_media_type_backplane)
+                       reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
+                                  IXGBE_AUTOC_ASM_PAUSE);
+               else if (hw->phy.media_type == ixgbe_media_type_copper)
+                       reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
+               break;
+       default:
+               DEBUGOUT("Flow control param set incorrectly\n");
+               ret_val = IXGBE_ERR_CONFIG;
+               goto out;
+               break;
+       }
+
+       if (hw->mac.type != ixgbe_mac_X540) {
+               /*
+                * Enable auto-negotiation between the MAC & PHY;
+                * the MAC will advertise clause 37 flow control.
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
+               reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
+
+               /* Disable AN timeout */
+               if (hw->fc.strict_ieee)
+                       reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
+
+               IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
+               DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg);
+       }
+
+       /*
+        * AUTOC restart handles negotiation of 1G and 10G on backplane
+        * and copper. There is no need to set the PCS1GCTL register.
+        *
+        */
+       if (hw->phy.media_type == ixgbe_media_type_backplane) {
+               reg_bp |= IXGBE_AUTOC_AN_RESTART;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
+       } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
+                   (ixgbe_device_supports_autoneg_fc(hw) == IXGBE_SUCCESS)) {
+               hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu);
+       }
+
+       DEBUGOUT1("Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_disable_pcie_master - Disable PCI-express master access
+ *  @hw: pointer to hardware structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
+ *  bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
+ *  is returned signifying master requests disabled.
+ **/
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_disable_pcie_master");
+
+       /* Always set this bit to ensure any future transactions are blocked */
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
+
+       /* Exit if master requets are blocked */
+       if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+               goto out;
+
+       /* Poll for master request bit to clear */
+       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+               usec_delay(100);
+               if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+                       goto out;
+       }
+
+       /*
+        * Two consecutive resets are required via CTRL.RST per datasheet
+        * 5.2.5.3.2 Master Disable.  We set a flag to inform the reset routine
+        * of this need.  The first reset prevents new master requests from
+        * being issued by our device.  We then must wait 1usec or more for any
+        * remaining completions from the PCIe bus to trickle in, and then reset
+        * again to clear out any effects they may have had on our device.
+        */
+       DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+       hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+
+       /*
+        * Before proceeding, make sure that the PCIe block does not have
+        * transactions pending.
+        */
+       for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+               usec_delay(100);
+               if (!(IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS) &
+                       IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
+                       goto out;
+       }
+
+       DEBUGOUT("PCIe transaction pending bit also did not clear.\n");
+       status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_acquire_swfw_sync - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 gssr;
+       u32 swmask = mask;
+       u32 fwmask = mask << 5;
+       s32 timeout = 200;
+
+       DEBUGFUNC("ixgbe_acquire_swfw_sync");
+
+       while (timeout) {
+               /*
+                * SW EEPROM semaphore bit is used for access to all
+                * SW_FW_SYNC/GSSR bits (not just EEPROM)
+                */
+               if (ixgbe_get_eeprom_semaphore(hw))
+                       return IXGBE_ERR_SWFW_SYNC;
+
+               gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+               if (!(gssr & (fwmask | swmask)))
+                       break;
+
+               /*
+                * Firmware currently using resource (fwmask) or other software
+                * thread currently using resource (swmask)
+                */
+               ixgbe_release_eeprom_semaphore(hw);
+               msec_delay(5);
+               timeout--;
+       }
+
+       if (!timeout) {
+               DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+               return IXGBE_ERR_SWFW_SYNC;
+       }
+
+       gssr |= swmask;
+       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+       ixgbe_release_eeprom_semaphore(hw);
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_release_swfw_sync - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore through the GSSR register for the specified
+ *  function (CSR, PHY0, PHY1, EEPROM, Flash)
+ **/
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 gssr;
+       u32 swmask = mask;
+
+       DEBUGFUNC("ixgbe_release_swfw_sync");
+
+       ixgbe_get_eeprom_semaphore(hw);
+
+       gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
+       gssr &= ~swmask;
+       IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+
+       ixgbe_release_eeprom_semaphore(hw);
+}
+
+/**
+ *  ixgbe_enable_rx_dma_generic - Enable the Rx DMA unit
+ *  @hw: pointer to hardware structure
+ *  @regval: register value to write to RXCTRL
+ *
+ *  Enables the Rx DMA unit
+ **/
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
+{
+       DEBUGFUNC("ixgbe_enable_rx_dma_generic");
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_blink_led_start_generic - Blink LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to blink
+ **/
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
+{
+       ixgbe_link_speed speed = 0;
+       bool link_up = 0;
+       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       DEBUGFUNC("ixgbe_blink_led_start_generic");
+
+       /*
+        * Link must be up to auto-blink the LEDs;
+        * Force it if link is down.
+        */
+       hw->mac.ops.check_link(hw, &speed, &link_up, FALSE);
+
+       if (!link_up) {
+               autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+               autoc_reg |= IXGBE_AUTOC_FLU;
+               IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+               IXGBE_WRITE_FLUSH(hw);
+               msec_delay(10);
+       }
+
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg |= IXGBE_LED_BLINK(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_blink_led_stop_generic - Stop blinking LED based on index.
+ *  @hw: pointer to hardware structure
+ *  @index: led number to stop blinking
+ **/
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
+{
+       u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+       u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+
+       DEBUGFUNC("ixgbe_blink_led_stop_generic");
+
+
+       autoc_reg &= ~IXGBE_AUTOC_FLU;
+       autoc_reg |= IXGBE_AUTOC_AN_RESTART;
+       IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+       led_reg &= ~IXGBE_LED_MODE_MASK(index);
+       led_reg &= ~IXGBE_LED_BLINK(index);
+       led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_offset: SAN MAC address offset
+ *
+ *  This function will read the EEPROM location for the SAN MAC address
+ *  pointer, and returns the value at that location.  This is used in both
+ *  get and set mac_addr routines.
+ **/
+static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
+                                        u16 *san_mac_offset)
+{
+       DEBUGFUNC("ixgbe_get_san_mac_addr_offset");
+
+       /*
+        * First read the EEPROM pointer to see if the MAC addresses are
+        * available.
+        */
+       hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Reads the SAN MAC address from the EEPROM, if it's available.  This is
+ *  per-port, so set_lan_id() must be called before reading the addresses.
+ *  set_lan_id() is called by identify_sfp(), but this cannot be relied
+ *  upon for non-SFP connections, so we must call it here.
+ **/
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       u16 san_mac_data, san_mac_offset;
+       u8 i;
+
+       DEBUGFUNC("ixgbe_get_san_mac_addr_generic");
+
+       /*
+        * First read the EEPROM pointer to see if the MAC addresses are
+        * available.  If they're not, no point in calling set_lan_id() here.
+        */
+       ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+               /*
+                * No addresses available in this EEPROM.  It's not an
+                * error though, so just wipe the local address and return.
+                */
+               for (i = 0; i < 6; i++)
+                       san_mac_addr[i] = 0xFF;
+
+               goto san_mac_addr_out;
+       }
+
+       /* make sure we know which port we need to program */
+       hw->mac.ops.set_lan_id(hw);
+       /* apply the port offset to the address offset */
+       (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+       for (i = 0; i < 3; i++) {
+               hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+               san_mac_addr[i * 2] = (u8)(san_mac_data);
+               san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
+               san_mac_offset++;
+       }
+
+san_mac_addr_out:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @san_mac_addr: SAN MAC address
+ *
+ *  Write a SAN MAC address to the EEPROM.
+ **/
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 san_mac_data, san_mac_offset;
+       u8 i;
+
+       DEBUGFUNC("ixgbe_set_san_mac_addr_generic");
+
+       /* Look for SAN mac address pointer.  If not defined, return */
+       ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+
+       if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
+               status = IXGBE_ERR_NO_SAN_ADDR_PTR;
+               goto san_mac_addr_out;
+       }
+
+       /* Make sure we know which port we need to write */
+       hw->mac.ops.set_lan_id(hw);
+       /* Apply the port offset to the address offset */
+       (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
+                        (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+
+       for (i = 0; i < 3; i++) {
+               san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8);
+               san_mac_data |= (u16)(san_mac_addr[i * 2]);
+               hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data);
+               san_mac_offset++;
+       }
+
+san_mac_addr_out:
+       return status;
+}
+
+/**
+ *  ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
+ *  @hw: pointer to hardware structure
+ *
+ *  Read PCIe configuration space, and get the MSI-X vector count from
+ *  the capabilities table.
+ **/
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
+{
+       u32 msix_count = 64;
+
+       DEBUGFUNC("ixgbe_get_pcie_msix_count_generic");
+       if (hw->mac.msix_vectors_from_pcie) {
+               msix_count = IXGBE_READ_PCIE_WORD(hw,
+                                                 IXGBE_PCIE_MSIX_82599_CAPS);
+               msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
+
+               /* MSI-X count is zero-based in HW, so increment to give
+                * proper value */
+               msix_count++;
+       }
+
+       return msix_count;
+}
+
+/**
+ *  ixgbe_insert_mac_addr_generic - Find a RAR for this mac address
+ *  @hw: pointer to hardware structure
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq pool to assign
+ *
+ *  Puts an ethernet address into a receive address register, or
+ *  finds the rar that it is aleady in; adds to the pool list
+ **/
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
+{
+       static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF;
+       u32 first_empty_rar = NO_EMPTY_RAR_FOUND;
+       u32 rar;
+       u32 rar_low, rar_high;
+       u32 addr_low, addr_high;
+
+       DEBUGFUNC("ixgbe_insert_mac_addr_generic");
+
+       /* swap bytes for HW little endian */
+       addr_low  = addr[0] | (addr[1] << 8)
+                           | (addr[2] << 16)
+                           | (addr[3] << 24);
+       addr_high = addr[4] | (addr[5] << 8);
+
+       /*
+        * Either find the mac_id in rar or find the first empty space.
+        * rar_highwater points to just after the highest currently used
+        * rar in order to shorten the search.  It grows when we add a new
+        * rar to the top.
+        */
+       for (rar = 0; rar < hw->mac.rar_highwater; rar++) {
+               rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar));
+
+               if (((IXGBE_RAH_AV & rar_high) == 0)
+                   && first_empty_rar == NO_EMPTY_RAR_FOUND) {
+                       first_empty_rar = rar;
+               } else if ((rar_high & 0xFFFF) == addr_high) {
+                       rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar));
+                       if (rar_low == addr_low)
+                               break;    /* found it already in the rars */
+               }
+       }
+
+       if (rar < hw->mac.rar_highwater) {
+               /* already there so just add to the pool bits */
+               ixgbe_set_vmdq(hw, rar, vmdq);
+       } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) {
+               /* stick it into first empty RAR slot we found */
+               rar = first_empty_rar;
+               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+       } else if (rar == hw->mac.rar_highwater) {
+               /* add it to the top of the list and inc the highwater mark */
+               ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV);
+               hw->mac.rar_highwater++;
+       } else if (rar >= hw->mac.num_rar_entries) {
+               return IXGBE_ERR_INVALID_MAC_ADDR;
+       }
+
+       /*
+        * If we found rar[0], make sure the default pool bit (we use pool 0)
+        * remains cleared to be sure default pool packets will get delivered
+        */
+       if (rar == 0)
+               ixgbe_clear_vmdq(hw, rar, 0);
+
+       return rar;
+}
+
+/**
+ *  ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to disassociate
+ *  @vmdq: VMDq pool index to remove from the rar
+ **/
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 mpsar_lo, mpsar_hi;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_clear_vmdq_generic");
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+       mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+
+       if (!mpsar_lo && !mpsar_hi)
+               goto done;
+
+       if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
+               if (mpsar_lo) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
+                       mpsar_lo = 0;
+               }
+               if (mpsar_hi) {
+                       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
+                       mpsar_hi = 0;
+               }
+       } else if (vmdq < 32) {
+               mpsar_lo &= ~(1 << vmdq);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
+       } else {
+               mpsar_hi &= ~(1 << (vmdq - 32));
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
+       }
+
+       /* was that the last pool using this rar? */
+       if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+               hw->mac.ops.clear_rar(hw, rar);
+done:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
+ *  @hw: pointer to hardware struct
+ *  @rar: receive address register index to associate with a VMDq index
+ *  @vmdq: VMDq pool index
+ **/
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
+{
+       u32 mpsar;
+       u32 rar_entries = hw->mac.num_rar_entries;
+
+       DEBUGFUNC("ixgbe_set_vmdq_generic");
+
+       /* Make sure we are using a valid rar index range */
+       if (rar >= rar_entries) {
+               DEBUGOUT1("RAR index %d is out of range.\n", rar);
+               return IXGBE_ERR_INVALID_ARGUMENT;
+       }
+
+       if (vmdq < 32) {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
+               mpsar |= 1 << vmdq;
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
+       } else {
+               mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
+               mpsar |= 1 << (vmdq - 32);
+               IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
+       }
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
+{
+       int i;
+
+       DEBUGFUNC("ixgbe_init_uta_tables_generic");
+       DEBUGOUT(" Clearing UTA\n");
+
+       for (i = 0; i < 128; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *
+ *  return the VLVF index where this VLAN id should be placed
+ *
+ **/
+s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
+{
+       u32 bits = 0;
+       u32 first_empty_slot = 0;
+       s32 regindex;
+
+       /* short cut the special case */
+       if (vlan == 0)
+               return 0;
+
+       /*
+         * Search for the vlan id in the VLVF entries. Save off the first empty
+         * slot found along the way
+         */
+       for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+               bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+               if (!bits && !(first_empty_slot))
+                       first_empty_slot = regindex;
+               else if ((bits & 0x0FFF) == vlan)
+                       break;
+       }
+
+       /*
+         * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
+         * in the VLVF. Else use the first empty VLVF register for this
+         * vlan id.
+         */
+       if (regindex >= IXGBE_VLVF_ENTRIES) {
+               if (first_empty_slot)
+                       regindex = first_empty_slot;
+               else {
+                       DEBUGOUT("No space in VLVF.\n");
+                       regindex = IXGBE_ERR_NO_SPACE;
+               }
+       }
+
+       return regindex;
+}
+
+/**
+ *  ixgbe_set_vfta_generic - Set VLAN filter table
+ *  @hw: pointer to hardware structure
+ *  @vlan: VLAN id to write to VLAN filter
+ *  @vind: VMDq output index that maps queue to VLAN id in VFVFB
+ *  @vlan_on: boolean flag to turn on/off VLAN in VFVF
+ *
+ *  Turn on/off specified VLAN in the VLAN filter table.
+ **/
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+                           bool vlan_on)
+{
+       s32 regindex;
+       u32 bitindex;
+       u32 vfta;
+       u32 bits;
+       u32 vt;
+       u32 targetbit;
+       bool vfta_changed = FALSE;
+
+       DEBUGFUNC("ixgbe_set_vfta_generic");
+
+       if (vlan > 4095)
+               return IXGBE_ERR_PARAM;
+
+       /*
+        * this is a 2 part operation - first the VFTA, then the
+        * VLVF and VLVFB if VT Mode is set
+        * We don't write the VFTA until we know the VLVF part succeeded.
+        */
+
+       /* Part 1
+        * The VFTA is a bitstring made up of 128 32-bit registers
+        * that enable the particular VLAN id, much like the MTA:
+        *    bits[11-5]: which register
+        *    bits[4-0]:  which bit in the register
+        */
+       regindex = (vlan >> 5) & 0x7F;
+       bitindex = vlan & 0x1F;
+       targetbit = (1 << bitindex);
+       vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+
+       if (vlan_on) {
+               if (!(vfta & targetbit)) {
+                       vfta |= targetbit;
+                       vfta_changed = TRUE;
+               }
+       } else {
+               if ((vfta & targetbit)) {
+                       vfta &= ~targetbit;
+                       vfta_changed = TRUE;
+               }
+       }
+
+       /* Part 2
+        * If VT Mode is set
+        *   Either vlan_on
+        *     make sure the vlan is in VLVF
+        *     set the vind bit in the matching VLVFB
+        *   Or !vlan_on
+        *     clear the pool bit and possibly the vind
+        */
+       vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+       if (vt & IXGBE_VT_CTL_VT_ENABLE) {
+               s32 vlvf_index;
+
+               vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
+               if (vlvf_index < 0)
+                       return vlvf_index;
+
+               if (vlan_on) {
+                       /* set the pool bit */
+                       if (vind < 32) {
+                               bits = IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB(vlvf_index*2));
+                               bits |= (1 << vind);
+                               IXGBE_WRITE_REG(hw,
+                                               IXGBE_VLVFB(vlvf_index*2),
+                                               bits);
+                       } else {
+                               bits = IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB((vlvf_index*2)+1));
+                               bits |= (1 << (vind-32));
+                               IXGBE_WRITE_REG(hw,
+                                               IXGBE_VLVFB((vlvf_index*2)+1),
+                                               bits);
+                       }
+               } else {
+                       /* clear the pool bit */
+                       if (vind < 32) {
+                               bits = IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB(vlvf_index*2));
+                               bits &= ~(1 << vind);
+                               IXGBE_WRITE_REG(hw,
+                                               IXGBE_VLVFB(vlvf_index*2),
+                                               bits);
+                               bits |= IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB((vlvf_index*2)+1));
+                       } else {
+                               bits = IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB((vlvf_index*2)+1));
+                               bits &= ~(1 << (vind-32));
+                               IXGBE_WRITE_REG(hw,
+                                               IXGBE_VLVFB((vlvf_index*2)+1),
+                                               bits);
+                               bits |= IXGBE_READ_REG(hw,
+                                               IXGBE_VLVFB(vlvf_index*2));
+                       }
+               }
+
+               /*
+                * If there are still bits set in the VLVFB registers
+                * for the VLAN ID indicated we need to see if the
+                * caller is requesting that we clear the VFTA entry bit.
+                * If the caller has requested that we clear the VFTA
+                * entry bit but there are still pools/VFs using this VLAN
+                * ID entry then ignore the request.  We're not worried
+                * about the case where we're turning the VFTA VLAN ID
+                * entry bit on, only when requested to turn it off as
+                * there may be multiple pools and/or VFs using the
+                * VLAN ID entry.  In that case we cannot clear the
+                * VFTA bit until all pools/VFs using that VLAN ID have also
+                * been cleared.  This will be indicated by "bits" being
+                * zero.
+                */
+               if (bits) {
+                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+                                       (IXGBE_VLVF_VIEN | vlan));
+                       if (!vlan_on) {
+                               /* someone wants to clear the vfta entry
+                                * but some pools/VFs are still using it.
+                                * Ignore it. */
+                               vfta_changed = FALSE;
+                       }
+               }
+               else
+                       IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+       }
+
+       if (vfta_changed)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the VLAN filer table, and the VMDq index associated with the filter
+ **/
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
+{
+       u32 offset;
+
+       DEBUGFUNC("ixgbe_clear_vfta_generic");
+
+       for (offset = 0; offset < hw->mac.vft_size; offset++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
+
+       for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_generic - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: TRUE when link is up
+ *  @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                                 bool *link_up, bool link_up_wait_to_complete)
+{
+       u32 links_reg, links_orig;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_check_mac_link_generic");
+
+       /* clear the old state */
+       links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+
+       if (links_orig != links_reg) {
+               DEBUGOUT2("LINKS changed from %08X to %08X\n",
+                         links_orig, links_reg);
+       }
+
+       if (link_up_wait_to_complete) {
+               for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
+                       if (links_reg & IXGBE_LINKS_UP) {
+                               *link_up = TRUE;
+                               break;
+                       } else {
+                               *link_up = FALSE;
+                       }
+                       msec_delay(100);
+                       links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
+               }
+       } else {
+               if (links_reg & IXGBE_LINKS_UP)
+                       *link_up = TRUE;
+               else
+                       *link_up = FALSE;
+       }
+
+       if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+           IXGBE_LINKS_SPEED_10G_82599)
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+       else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+                IXGBE_LINKS_SPEED_1G_82599)
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+       else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
+                IXGBE_LINKS_SPEED_100_82599)
+               *speed = IXGBE_LINK_SPEED_100_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_UNKNOWN;
+
+       /* if link is down, zero out the current_mode */
+       if (*link_up == FALSE) {
+               hw->fc.current_mode = ixgbe_fc_none;
+               hw->fc.fc_was_autonegged = FALSE;
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                 u16 *wwpn_prefix)
+{
+       u16 offset, caps;
+       u16 alt_san_mac_blk_offset;
+
+       DEBUGFUNC("ixgbe_get_wwn_prefix_generic");
+
+       /* clear output first */
+       *wwnn_prefix = 0xFFFF;
+       *wwpn_prefix = 0xFFFF;
+
+       /* check if alternative SAN MAC is supported */
+       hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+                           &alt_san_mac_blk_offset);
+
+       if ((alt_san_mac_blk_offset == 0) ||
+           (alt_san_mac_blk_offset == 0xFFFF))
+               goto wwn_prefix_out;
+
+       /* check capability in alternative san mac address block */
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+       hw->eeprom.ops.read(hw, offset, &caps);
+       if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+               goto wwn_prefix_out;
+
+       /* get the corresponding prefix for WWNN/WWPN */
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+       hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+       offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+       hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM
+ *  @hw: pointer to hardware structure
+ *  @bs: the fcoe boot status
+ *
+ *  This function will read the FCOE boot status from the iSCSI FCOE block
+ **/
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs)
+{
+       u16 offset, caps, flags;
+       s32 status;
+
+       DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic");
+
+       /* clear output first */
+       *bs = ixgbe_fcoe_bootstatus_unavailable;
+
+       /* check if FCOE IBA block is present */
+       offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR;
+       status = hw->eeprom.ops.read(hw, offset, &caps);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE))
+               goto out;
+
+       /* check if iSCSI FCOE block is populated */
+       status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       if ((offset == 0) || (offset == 0xFFFF))
+               goto out;
+
+       /* read fcoe flags in iSCSI FCOE block */
+       offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET;
+       status = hw->eeprom.ops.read(hw, offset, &flags);
+       if (status != IXGBE_SUCCESS)
+               goto out;
+
+       if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE)
+               *bs = ixgbe_fcoe_bootstatus_enabled;
+       else
+               *bs = ixgbe_fcoe_bootstatus_disabled;
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
+ *  control
+ *  @hw: pointer to hardware structure
+ *
+ *  There are several phys that do not support autoneg flow control. This
+ *  function check the device id to see if the associated phy supports
+ *  autoneg flow control.
+ **/
+static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+{
+
+       DEBUGFUNC("ixgbe_device_supports_autoneg_fc");
+
+       switch (hw->device_id) {
+       case IXGBE_DEV_ID_X540T:
+               return IXGBE_SUCCESS;
+       case IXGBE_DEV_ID_82599_T3_LOM:
+               return IXGBE_SUCCESS;
+       default:
+               return IXGBE_ERR_FC_NOT_SUPPORTED;
+       }
+}
+
+/**
+ *  ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for anti-spoofing
+ *  @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ *
+ **/
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+{
+       int j;
+       int pf_target_reg = pf >> 3;
+       int pf_target_shift = pf % 8;
+       u32 pfvfspoof = 0;
+
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return;
+
+       if (enable)
+               pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
+
+       /*
+        * PFVFSPOOF register array is size 8 with 8 bits assigned to
+        * MAC anti-spoof enables in each register array element.
+        */
+       for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
+               IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
+
+       /* If not enabling anti-spoofing then done */
+       if (!enable)
+               return;
+
+       /*
+        * The PF should be allowed to spoof so that it can support
+        * emulation mode NICs.  Reset the bit assigned to the PF
+        */
+       pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
+       pfvfspoof ^= (1 << pf_target_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
+}
+
+/**
+ *  ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
+ *  @hw: pointer to hardware structure
+ *  @enable: enable or disable switch for VLAN anti-spoofing
+ *  @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
+ *
+ **/
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
+{
+       int vf_target_reg = vf >> 3;
+       int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
+       u32 pfvfspoof;
+
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return;
+
+       pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
+       if (enable)
+               pfvfspoof |= (1 << vf_target_shift);
+       else
+               pfvfspoof &= ~(1 << vf_target_shift);
+       IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
+}
+
+/**
+ *  ixgbe_get_device_caps_generic - Get additional device capabilities
+ *  @hw: pointer to hardware structure
+ *  @device_caps: the EEPROM word with the extra device capabilities
+ *
+ *  This function will read the EEPROM location for the device capabilities,
+ *  and return the word through device_caps.
+ **/
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps)
+{
+       DEBUGFUNC("ixgbe_get_device_caps_generic");
+
+       hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_enable_relaxed_ordering_gen2 - Enable relaxed ordering
+ *  @hw: pointer to hardware structure
+ *
+ **/
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw)
+{
+       u32 regval;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_enable_relaxed_ordering_gen2");
+
+       /* Enable relaxed ordering */
+       for (i = 0; i < hw->mac.max_tx_queues; i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
+               regval |= IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
+       }
+
+       for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
+               regval |= (IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                          IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
+       }
+
+}
+
+/**
+ *  ixgbe_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
+{
+       u32 i;
+       u8 sum = 0;
+
+       DEBUGFUNC("ixgbe_calculate_checksum");
+
+       if (!buffer)
+               return 0;
+
+       for (i = 0; i < length; i++)
+               sum += buffer[i];
+
+       return (u8) (0 - sum);
+}
+
+/**
+ *  ixgbe_host_interface_command - Issue command to manageability block
+ *  @hw: pointer to the HW structure
+ *  @buffer: contains the command to write and where the return status will
+ *           be placed
+ *  @lenght: lenght of buffer, must be multiple of 4 bytes
+ *
+ *  Communicates with the manageability block.  On success return IXGBE_SUCCESS
+ *  else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
+ **/
+static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u8 *buffer,
+                                        u32 length)
+{
+       u32 hicr, i;
+       u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+       u8 buf_len, dword_len;
+
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_host_interface_command");
+
+       if (length == 0 || length & 0x3 ||
+           length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+               DEBUGOUT("Buffer length failure.\n");
+               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Check that the host interface is enabled. */
+       hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+       if ((hicr & IXGBE_HICR_EN) == 0) {
+               DEBUGOUT("IXGBE_HOST_EN bit disabled.\n");
+               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Calculate length in DWORDs */
+       dword_len = length >> 2;
+
+       /*
+        * The device driver writes the relevant command block
+        * into the ram area.
+        */
+       for (i = 0; i < dword_len; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
+                                     i, *((u32 *)buffer + i));
+
+       /* Setting this bit tells the ARC that a new command is pending. */
+       IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
+
+       for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) {
+               hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
+               if (!(hicr & IXGBE_HICR_C))
+                       break;
+               msec_delay(1);
+       }
+
+       /* Check command successful completion. */
+       if (i == IXGBE_HI_COMMAND_TIMEOUT ||
+           (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+               DEBUGOUT("Command has failed with no status valid.\n");
+               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Calculate length in DWORDs */
+       dword_len = hdr_size >> 2;
+
+       /* first pull in the header so we know the buffer length */
+       for (i = 0; i < dword_len; i++)
+               *((u32 *)buffer + i) =
+                       IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+       /* If there is any thing in data position pull it in */
+       buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
+       if (buf_len == 0)
+               goto out;
+
+       if (length < (buf_len + hdr_size)) {
+               DEBUGOUT("Buffer not large enough for reply message.\n");
+               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+               goto out;
+       }
+
+       /* Calculate length in DWORDs, add one for odd lengths */
+       dword_len = (buf_len + 1) >> 2;
+
+       /* Pull in the rest of the buffer (i is where we left off)*/
+       for (; i < buf_len; i++)
+               *((u32 *)buffer + i) =
+                       IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, i);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_set_fw_drv_ver_generic - Sends driver version to firmware
+ *  @hw: pointer to the HW structure
+ *  @maj: driver version major number
+ *  @min: driver version minor number
+ *  @build: driver version build number
+ *  @sub: driver version sub build number
+ *
+ *  Sends driver version number to firmware through the manageability
+ *  block.  On success return IXGBE_SUCCESS
+ *  else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring
+ *  semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails.
+ **/
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+                                u8 build, u8 sub)
+{
+       struct ixgbe_hic_drv_info fw_cmd;
+       int i;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_set_fw_drv_ver_generic");
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM)
+           != IXGBE_SUCCESS) {
+               ret_val = IXGBE_ERR_SWFW_SYNC;
+               goto out;
+       }
+
+       fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
+       fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
+       fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
+       fw_cmd.port_num = (u8)hw->bus.func;
+       fw_cmd.ver_maj = maj;
+       fw_cmd.ver_min = min;
+       fw_cmd.ver_build = build;
+       fw_cmd.ver_sub = sub;
+       fw_cmd.hdr.checksum = 0;
+       fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd,
+                               (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len));
+       fw_cmd.pad = 0;
+       fw_cmd.pad2 = 0;
+
+       for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
+               ret_val = ixgbe_host_interface_command(hw, (u8 *)&fw_cmd,
+                                                      sizeof(fw_cmd));
+               if (ret_val != IXGBE_SUCCESS)
+                       continue;
+
+               if (fw_cmd.hdr.cmd_or_resp.ret_status ==
+                   FW_CEM_RESP_STATUS_SUCCESS)
+                       ret_val = IXGBE_SUCCESS;
+               else
+                       ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+
+               break;
+       }
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+out:
+       return ret_val;
+}
+
+/**
+ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer
+ * @hw: pointer to hardware structure
+ * @num_pb: number of packet buffers to allocate
+ * @headroom: reserve n KB of headroom
+ * @strategy: packet buffer allocation strategy
+ **/
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+                             int strategy)
+{
+       u32 pbsize = hw->mac.rx_pb_size;
+       int i = 0;
+       u32 rxpktsize, txpktsize, txpbthresh;
+
+       /* Reserve headroom */
+       pbsize -= headroom;
+
+       if (!num_pb)
+               num_pb = 1;
+
+       /* Divide remaining packet buffer space amongst the number of packet
+        * buffers requested using supplied strategy.
+        */
+       switch (strategy) {
+       case (PBA_STRATEGY_WEIGHTED):
+               /* pba_80_48 strategy weight first half of packet buffer with
+                * 5/8 of the packet buffer space.
+                */
+               rxpktsize = (pbsize * 5 * 2) / (num_pb * 8);
+               pbsize -= rxpktsize * (num_pb / 2);
+               rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
+               for (; i < (num_pb / 2); i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               /* Fall through to configure remaining packet buffers */
+       case (PBA_STRATEGY_EQUAL):
+               rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+               for (; i < num_pb; i++)
+                       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+               break;
+       default:
+               break;
+       }
+
+       /* Only support an equally distributed Tx packet buffer strategy. */
+       txpktsize = IXGBE_TXPBSIZE_MAX / num_pb;
+       txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX;
+       for (i = 0; i < num_pb; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh);
+       }
+
+       /* Clear unused TCs, if any, to zero buffer size*/
+       for (; i < IXGBE_MAX_PB; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0);
+       }
+}
+
+/**
+ * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo
+ * @hw: pointer to the hardware structure
+ *
+ * The 82599 and x540 MACs can experience issues if TX work is still pending
+ * when a reset occurs.  This function prevents this by flushing the PCIe
+ * buffers on the system.
+ **/
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
+{
+       u32 gcr_ext, hlreg0;
+
+       /*
+        * If double reset is not requested then all transactions should
+        * already be clear and as such there is no work to do
+        */
+       if (!(hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED))
+               return;
+
+       /*
+        * Set loopback enable to prevent any transmits from being sent
+        * should the link come up.  This assumes that the RXCTRL.RXEN bit
+        * has already been cleared.
+        */
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK);
+
+       /* initiate cleaning flow for buffers in the PCIe transaction layer */
+       gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT,
+                       gcr_ext | IXGBE_GCR_EXT_BUFFERS_CLEAR);
+
+       /* Flush all writes and allow 20usec for all transactions to clear */
+       IXGBE_WRITE_FLUSH(hw);
+       usec_delay(20);
+
+       /* restore previous register values */
+       IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_common.h
new file mode 100644 (file)
index 0000000..35f1e33
--- /dev/null
@@ -0,0 +1,135 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_COMMON_H_
+#define _IXGBE_COMMON_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_WRITE_REG64(hw, reg, value) \
+       do { \
+               IXGBE_WRITE_REG(hw, reg, (u32) value); \
+               IXGBE_WRITE_REG(hw, reg + 4, (u32) (value >> 32)); \
+       } while (0)
+
+u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size);
+s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
+void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
+s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                               u16 words, u16 *data);
+s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+                                   u16 words, u16 *data);
+s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
+                                    u16 words, u16 *data);
+s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                       u16 *data);
+s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
+                                              u16 words, u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
+                                           u16 *checksum_val);
+s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
+
+s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                          u32 enable_addr);
+s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
+s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                      u32 mc_addr_count,
+                                      ixgbe_mc_addr_itr func, bool clear);
+s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list,
+                                      u32 addr_count, ixgbe_mc_addr_itr func);
+s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
+s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
+
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
+
+s32 ixgbe_validate_mac_addr(u8 *mac_addr);
+s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
+s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+
+s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
+
+s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
+
+s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
+s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
+s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
+s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
+                         u32 vind, bool vlan_on);
+s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
+
+s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
+                               ixgbe_link_speed *speed,
+                               bool *link_up, bool link_up_wait_to_complete);
+
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                 u16 *wwpn_prefix);
+
+s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
+s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
+void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
+                             int strategy);
+void ixgbe_enable_relaxed_ordering_gen2(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
+                                 u8 build, u8 ver);
+void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
+#endif /* IXGBE_COMMON */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.c
new file mode 100644 (file)
index 0000000..0fe4ca7
--- /dev/null
@@ -0,0 +1,751 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_type.h"
+#include "ixgbe_mbx.h"
+
+/**
+ *  ixgbe_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_read_mbx");
+
+       /* limit read to size of mailbox */
+       if (size > mbx->size)
+               size = mbx->size;
+
+       if (mbx->ops.read)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_write_mbx");
+
+       if (size > mbx->size)
+               ret_val = IXGBE_ERR_MBX;
+
+       else if (mbx->ops.write)
+               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_check_for_msg");
+
+       if (mbx->ops.check_for_msg)
+               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_check_for_ack");
+
+       if (mbx->ops.check_for_ack)
+               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_check_for_rst");
+
+       if (mbx->ops.check_for_rst)
+               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("ixgbe_poll_for_msg");
+
+       if (!countdown || !mbx->ops.check_for_msg)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+out:
+       return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       int countdown = mbx->timeout;
+
+       DEBUGFUNC("ixgbe_poll_for_ack");
+
+       if (!countdown || !mbx->ops.check_for_ack)
+               goto out;
+
+       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+               countdown--;
+               if (!countdown)
+                       break;
+               usec_delay(mbx->usec_delay);
+       }
+
+out:
+       return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+}
+
+/**
+ *  ixgbe_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_read_posted_mbx");
+
+       if (!mbx->ops.read)
+               goto out;
+
+       ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+       /* if ack received read message, otherwise we timed out */
+       if (!ret_val)
+               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                           u16 mbx_id)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_write_posted_mbx");
+
+       /* exit if either we can't write or there isn't a defined timeout */
+       if (!mbx->ops.write || !mbx->timeout)
+               goto out;
+
+       /* send msg */
+       ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+       /* if msg sent wait until we receive an ack */
+       if (!ret_val)
+               ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups up the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+/**
+ *  ixgbe_read_v2p_mailbox - read v2p mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  This function is used to read the v2p mailbox without losing the read to
+ *  clear status bits.
+ **/
+static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+       u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+       v2p_mailbox |= hw->mbx.v2p_mailbox;
+       hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+       return v2p_mailbox;
+}
+
+/**
+ *  ixgbe_check_for_bit_vf - Determine if a status bit was set
+ *  @hw: pointer to the HW structure
+ *  @mask: bitmask for bits to be tested and cleared
+ *
+ *  This function is used to check for the read to clear bits within
+ *  the V2P mailbox.
+ **/
+static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+       u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (v2p_mailbox & mask)
+               ret_val = IXGBE_SUCCESS;
+
+       hw->mbx.v2p_mailbox &= ~mask;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_vf - checks to see if the PF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       UNREFERENCED_1PARAMETER(mbx_id);
+       DEBUGFUNC("ixgbe_check_for_msg_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+               ret_val = IXGBE_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       UNREFERENCED_1PARAMETER(mbx_id);
+       DEBUGFUNC("ixgbe_check_for_ack_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+               ret_val = IXGBE_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_vf - checks to see if the PF has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns TRUE if the PF has set the reset done bit or else FALSE
+ **/
+static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       UNREFERENCED_1PARAMETER(mbx_id);
+       DEBUGFUNC("ixgbe_check_for_rst_vf");
+
+       if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+                                        IXGBE_VFMAILBOX_RSTI))) {
+               ret_val = IXGBE_SUCCESS;
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_vf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
+
+       /* Take ownership of the buffer */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+       /* reserve mailbox for vf use */
+       if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+               ret_val = IXGBE_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_vf - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                              u16 mbx_id)
+{
+       s32 ret_val;
+       u16 i;
+
+       UNREFERENCED_1PARAMETER(mbx_id);
+
+       DEBUGFUNC("ixgbe_write_mbx_vf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_check_for_msg_vf(hw, 0);
+       ixgbe_check_for_ack_vf(hw, 0);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+       /* Drop VFU and interrupt the PF to tell it a message has been sent */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfuly read message from buffer
+ **/
+static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                             u16 mbx_id)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_read_mbx_vf");
+       UNREFERENCED_1PARAMETER(mbx_id);
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message from the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+       /* Acknowledge receipt and release mailbox, then we're done */
+       IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_vf - set initial values for vf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       /* start mailbox as timed out and let the reset_hw call set the timeout
+        * value to begin communications */
+       mbx->timeout = 0;
+       mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.read = ixgbe_read_mbx_vf;
+       mbx->ops.write = ixgbe_write_mbx_vf;
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+       u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       if (mbvficr & mask) {
+               ret_val = IXGBE_SUCCESS;
+               IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+       s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+       u32 vf_bit = vf_number % 16;
+
+       DEBUGFUNC("ixgbe_check_for_msg_pf");
+
+       if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+                                   index)) {
+               ret_val = IXGBE_SUCCESS;
+               hw->mbx.stats.reqs++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+       s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+       u32 vf_bit = vf_number % 16;
+
+       DEBUGFUNC("ixgbe_check_for_ack_pf");
+
+       if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+                                   index)) {
+               ret_val = IXGBE_SUCCESS;
+               hw->mbx.stats.acks++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+       u32 reg_offset = (vf_number < 32) ? 0 : 1;
+       u32 vf_shift = vf_number % 32;
+       u32 vflre = 0;
+       s32 ret_val = IXGBE_ERR_MBX;
+
+       DEBUGFUNC("ixgbe_check_for_rst_pf");
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+               vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+               break;
+       case ixgbe_mac_X540:
+               vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+               break;
+       default:
+               break;
+       }
+
+       if (vflre & (1 << vf_shift)) {
+               ret_val = IXGBE_SUCCESS;
+               IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+               hw->mbx.stats.rsts++;
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+       s32 ret_val = IXGBE_ERR_MBX;
+       u32 p2v_mailbox;
+
+       DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
+
+       /* Take ownership of the buffer */
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+       /* reserve mailbox for vf use */
+       p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+       if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+               ret_val = IXGBE_SUCCESS;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                              u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_write_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_write;
+
+       /* flush msg and acks as we are overwriting the message buffer */
+       ixgbe_check_for_msg_pf(hw, vf_number);
+       ixgbe_check_for_ack_pf(hw, vf_number);
+
+       /* copy the caller specified message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+       /* Interrupt VF to tell it a message has been sent and release buffer*/
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+       /* update stats */
+       hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+       return ret_val;
+
+}
+
+/**
+ *  ixgbe_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+                             u16 vf_number)
+{
+       s32 ret_val;
+       u16 i;
+
+       DEBUGFUNC("ixgbe_read_mbx_pf");
+
+       /* lock the mailbox to prevent pf/vf race condition */
+       ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+       if (ret_val)
+               goto out_no_read;
+
+       /* copy the message to the mailbox memory buffer */
+       for (i = 0; i < size; i++)
+               msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+       /* Acknowledge the message and release buffer */
+       IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+       /* update stats */
+       hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+       if (hw->mac.type != ixgbe_mac_82599EB &&
+           hw->mac.type != ixgbe_mac_X540)
+               return;
+
+       mbx->timeout = 0;
+       mbx->usec_delay = 0;
+
+       mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+       mbx->ops.read = ixgbe_read_mbx_pf;
+       mbx->ops.write = ixgbe_write_mbx_pf;
+       mbx->ops.read_posted = ixgbe_read_posted_mbx;
+       mbx->ops.write_posted = ixgbe_write_posted_mbx;
+       mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
+       mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
+       mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+
+       mbx->stats.msgs_tx = 0;
+       mbx->stats.msgs_rx = 0;
+       mbx->stats.reqs = 0;
+       mbx->stats.acks = 0;
+       mbx->stats.rsts = 0;
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_mbx.h
new file mode 100644 (file)
index 0000000..398d0a3
--- /dev/null
@@ -0,0 +1,112 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE        16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX               -100
+
+#define IXGBE_VFMAILBOX             0x002FC
+#define IXGBE_VFMBMEM               0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ      0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK      0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU      0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU      0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS    0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK    0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI     0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD     0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX_STS   0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK   0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU   0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU   0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU  0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1  0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1  0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is TRUE if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK      0x80000000  /* Messages below or'd with
+                                               * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK     0x40000000  /* Messages below or'd with
+                                               * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS      0x20000000  /* Indicates that VF is still
+                                                 clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT    16
+/* bits 23:16 are used for extra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK     (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET            0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR     0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST    0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN         0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE          0x05 /* VF requests PF to set VMOLR.LPE */
+#define IXGBE_VF_SET_MACVLAN      0x06 /* VF requests PF for unicast filter */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD     3
+
+#define IXGBE_PF_CONTROL_MSG      0x0100 /* PF control message */
+
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY   500  /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_osdep.h
new file mode 100644 (file)
index 0000000..fe7ac49
--- /dev/null
@@ -0,0 +1,145 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_OS_H_
+#define _IXGBE_OS_H_
+
+#include <string.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <rte_common.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_byteorder.h>
+
+#include "../ixgbe_logs.h"
+
+/* Remove some compiler warnings for the files in this dir */
+#ifdef __INTEL_COMPILER
+#pragma warning(disable:2259) /* Conversion may lose significant bits */
+#pragma warning(disable:869)  /* Parameter was never referenced */
+#pragma warning(disable:181)  /* Arg incompatible with format string */
+#pragma warning(disable:1419) /* External declaration in primary source file */
+#pragma warning(disable:111)  /* Statement is unreachable */
+#pragma warning(disable:981)  /* Operands are evaluated in unspecified order */
+#else
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#pragma GCC diagnostic ignored "-Wformat"
+#pragma GCC diagnostic ignored "-Wuninitialized"
+#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#if (((__GNUC__) >= 4) && ((__GNUC_MINOR__) >= 7))
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+#endif
+
+#define ASSERT(x) if(!(x)) rte_panic("IXGBE: x")
+
+#define DELAY(x) rte_delay_us(x)
+#define usec_delay(x) DELAY(x)
+#define msec_delay(x) DELAY(1000*(x))
+
+#define DEBUGFUNC(F)            DEBUGOUT(F);
+#define DEBUGOUT(S, args...)    PMD_DRV_LOG(DEBUG, S, ##args)
+#define DEBUGOUT1(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT2(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT3(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT6(S, args...)   DEBUGOUT(S, ##args)
+#define DEBUGOUT7(S, args...)   DEBUGOUT(S, ##args)
+
+#define FALSE               0
+#define TRUE                1
+
+/* Bunch of defines for shared code bogosity */
+#define UNREFERENCED_PARAMETER(_p)
+#define UNREFERENCED_1PARAMETER(_p)
+#define UNREFERENCED_2PARAMETER(_p, _q)
+#define UNREFERENCED_3PARAMETER(_p, _q, _r)
+#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+
+
+#define IXGBE_NTOHL(_i)        rte_be_to_cpu_32(_i)
+#define IXGBE_NTOHS(_i)        rte_be_to_cpu_16(_i)
+
+typedef uint8_t                u8;
+typedef int8_t         s8;
+typedef uint16_t       u16;
+typedef uint32_t       u32;
+typedef int32_t                s32;
+typedef uint64_t       u64;
+typedef int            bool;
+
+#define mb()   rte_mb()
+#define wmb()  rte_wmb()
+#define rmb()  rte_rmb()
+
+#define prefetch(x) rte_prefetch0(x)
+
+#define IXGBE_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
+
+static inline uint32_t ixgbe_read_addr(volatile void* addr)
+{
+       return IXGBE_PCI_REG(addr);
+}
+
+#define IXGBE_PCI_REG_WRITE(reg, value) do { \
+       IXGBE_PCI_REG((reg)) = (value); \
+} while(0)
+
+#define IXGBE_PCI_REG_ADDR(hw, reg) \
+       ((volatile uint32_t *)((char *)(hw)->hw_addr + (reg)))
+
+#define IXGBE_PCI_REG_ARRAY_ADDR(hw, reg, index) \
+       IXGBE_PCI_REG_ADDR((hw), (reg) + ((index) << 2))
+
+/* Not implemented !! */
+#define IXGBE_READ_PCIE_WORD(hw, reg) 0
+#define IXGBE_WRITE_PCIE_WORD(hw, reg, value) do { } while(0)
+
+#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+
+#define IXGBE_READ_REG(hw, reg) \
+       ixgbe_read_addr(IXGBE_PCI_REG_ADDR((hw), (reg)))
+
+#define IXGBE_WRITE_REG(hw, reg, value) \
+       IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ADDR((hw), (reg)), (value))
+
+#define IXGBE_READ_REG_ARRAY(hw, reg, index) \
+       IXGBE_PCI_REG(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)))
+
+#define IXGBE_WRITE_REG_ARRAY(hw, reg, index, value) \
+       IXGBE_PCI_REG_WRITE(IXGBE_PCI_REG_ARRAY_ADDR((hw), (reg), (index)), (value))
+
+#endif /* _IXGBE_OS_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.c
new file mode 100644 (file)
index 0000000..56565cd
--- /dev/null
@@ -0,0 +1,1843 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+static void ixgbe_i2c_start(struct ixgbe_hw *hw);
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data);
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data);
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw);
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data);
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data);
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl);
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data);
+static bool ixgbe_get_i2c_data(u32 *i2cctl);
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_phy_ops_generic - Inits PHY function ptrs
+ *  @hw: pointer to the hardware structure
+ *
+ *  Initialize the function pointers.
+ **/
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw)
+{
+       struct ixgbe_phy_info *phy = &hw->phy;
+
+       DEBUGFUNC("ixgbe_init_phy_ops_generic");
+
+       /* PHY */
+       phy->ops.identify = &ixgbe_identify_phy_generic;
+       phy->ops.reset = &ixgbe_reset_phy_generic;
+       phy->ops.read_reg = &ixgbe_read_phy_reg_generic;
+       phy->ops.write_reg = &ixgbe_write_phy_reg_generic;
+       phy->ops.setup_link = &ixgbe_setup_phy_link_generic;
+       phy->ops.setup_link_speed = &ixgbe_setup_phy_link_speed_generic;
+       phy->ops.check_link = NULL;
+       phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic;
+       phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_generic;
+       phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_generic;
+       phy->ops.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic;
+       phy->ops.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic;
+       phy->ops.i2c_bus_clear = &ixgbe_i2c_bus_clear;
+       phy->ops.identify_sfp = &ixgbe_identify_sfp_module_generic;
+       phy->sfp_type = ixgbe_sfp_type_unknown;
+       phy->ops.check_overtemp = &ixgbe_tn_check_overtemp;
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_identify_phy_generic - Get physical layer module
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines the physical layer module found on the current adapter.
+ **/
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       u32 phy_addr;
+       u16 ext_ability = 0;
+
+       DEBUGFUNC("ixgbe_identify_phy_generic");
+
+       if (hw->phy.type == ixgbe_phy_unknown) {
+               for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) {
+                       if (ixgbe_validate_phy_addr(hw, phy_addr)) {
+                               hw->phy.addr = phy_addr;
+                               ixgbe_get_phy_id(hw);
+                               hw->phy.type =
+                                       ixgbe_get_phy_type_from_id(hw->phy.id);
+
+                               if (hw->phy.type == ixgbe_phy_unknown) {
+                                       hw->phy.ops.read_reg(hw,
+                                                 IXGBE_MDIO_PHY_EXT_ABILITY,
+                                                 IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                                 &ext_ability);
+                                       if (ext_ability &
+                                           (IXGBE_MDIO_PHY_10GBASET_ABILITY |
+                                            IXGBE_MDIO_PHY_1000BASET_ABILITY))
+                                               hw->phy.type =
+                                                        ixgbe_phy_cu_unknown;
+                                       else
+                                               hw->phy.type =
+                                                        ixgbe_phy_generic;
+                               }
+
+                               status = IXGBE_SUCCESS;
+                               break;
+                       }
+               }
+               /* clear value if nothing found */
+               if (status != IXGBE_SUCCESS)
+                       hw->phy.addr = 0;
+       } else {
+               status = IXGBE_SUCCESS;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_validate_phy_addr - Determines phy address is valid
+ *  @hw: pointer to hardware structure
+ *
+ **/
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr)
+{
+       u16 phy_id = 0;
+       bool valid = FALSE;
+
+       DEBUGFUNC("ixgbe_validate_phy_addr");
+
+       hw->phy.addr = phy_addr;
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+                            IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id);
+
+       if (phy_id != 0xFFFF && phy_id != 0x0)
+               valid = TRUE;
+
+       return valid;
+}
+
+/**
+ *  ixgbe_get_phy_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
+{
+       u32 status;
+       u16 phy_id_high = 0;
+       u16 phy_id_low = 0;
+
+       DEBUGFUNC("ixgbe_get_phy_id");
+
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &phy_id_high);
+
+       if (status == IXGBE_SUCCESS) {
+               hw->phy.id = (u32)(phy_id_high << 16);
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW,
+                                             IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                             &phy_id_low);
+               hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
+               hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_type_from_id - Get the phy type
+ *  @hw: pointer to hardware structure
+ *
+ **/
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
+{
+       enum ixgbe_phy_type phy_type;
+
+       DEBUGFUNC("ixgbe_get_phy_type_from_id");
+
+       switch (phy_id) {
+       case TN1010_PHY_ID:
+               phy_type = ixgbe_phy_tn;
+               break;
+       case X540_PHY_ID:
+               phy_type = ixgbe_phy_aq;
+               break;
+       case QT2022_PHY_ID:
+               phy_type = ixgbe_phy_qt;
+               break;
+       case ATH_PHY_ID:
+               phy_type = ixgbe_phy_nl;
+               break;
+       default:
+               phy_type = ixgbe_phy_unknown;
+               break;
+       }
+
+       DEBUGOUT1("phy type found is %d\n", phy_type);
+       return phy_type;
+}
+
+/**
+ *  ixgbe_reset_phy_generic - Performs a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
+{
+       u32 i;
+       u16 ctrl = 0;
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_reset_phy_generic");
+
+       if (hw->phy.type == ixgbe_phy_unknown)
+               status = ixgbe_identify_phy_generic(hw);
+
+       if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none)
+               goto out;
+
+       /* Don't reset PHY if it's shut down due to overtemp. */
+       if (!hw->phy.reset_if_overtemp &&
+           (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
+               goto out;
+
+       /*
+        * Perform soft PHY reset to the PHY_XS.
+        * This will cause a soft reset to the PHY
+        */
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
+                             IXGBE_MDIO_PHY_XS_RESET);
+
+       /*
+        * Poll for reset bit to self-clear indicating reset is complete.
+        * Some PHYs could take up to 3 seconds to complete and need about
+        * 1.7 usec delay after the reset is complete.
+        */
+       for (i = 0; i < 30; i++) {
+               msec_delay(100);
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                                    IXGBE_MDIO_PHY_XS_DEV_TYPE, &ctrl);
+               if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) {
+                       usec_delay(2);
+                       break;
+               }
+       }
+
+       if (ctrl & IXGBE_MDIO_PHY_XS_RESET) {
+               status = IXGBE_ERR_RESET_FAILED;
+               DEBUGOUT("PHY reset polling failed to complete.\n");
+       }
+
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit address of PHY register to read
+ *  @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u16 *phy_data)
+{
+       u32 command;
+       u32 i;
+       u32 data;
+       s32 status = IXGBE_SUCCESS;
+       u16 gssr;
+
+       DEBUGFUNC("ixgbe_read_phy_reg_generic");
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               gssr = IXGBE_GSSR_PHY1_SM;
+       else
+               gssr = IXGBE_GSSR_PHY0_SM;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       if (status == IXGBE_SUCCESS) {
+               /* Setup and write the address cycle command */
+               command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                          (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+               IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+               /*
+                * Check every 10 usec to see if the address cycle completed.
+                * The MDI Command bit will clear when the operation is
+                * complete
+                */
+               for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+                       usec_delay(10);
+
+                       command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+                       if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                               break;
+               }
+
+               if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+                       DEBUGOUT("PHY address command did not complete.\n");
+                       status = IXGBE_ERR_PHY;
+               }
+
+               if (status == IXGBE_SUCCESS) {
+                       /*
+                        * Address cycle complete, setup and write the read
+                        * command
+                        */
+                       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                                  (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                                  (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+                       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+                       /*
+                        * Check every 10 usec to see if the address cycle
+                        * completed. The MDI Command bit will clear when the
+                        * operation is complete
+                        */
+                       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+                               usec_delay(10);
+
+                               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+                               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                                       break;
+                       }
+
+                       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+                               DEBUGOUT("PHY read command didn't complete\n");
+                               status = IXGBE_ERR_PHY;
+                       } else {
+                               /*
+                                * Read operation is complete.  Get the data
+                                * from MSRWD
+                                */
+                               data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+                               data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+                               *phy_data = (u16)(data);
+                       }
+               }
+
+               hw->mac.ops.release_swfw_sync(hw, gssr);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ *  @hw: pointer to hardware structure
+ *  @reg_addr: 32 bit PHY register to write
+ *  @device_type: 5 bit device type
+ *  @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                                u32 device_type, u16 phy_data)
+{
+       u32 command;
+       u32 i;
+       s32 status = IXGBE_SUCCESS;
+       u16 gssr;
+
+       DEBUGFUNC("ixgbe_write_phy_reg_generic");
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               gssr = IXGBE_GSSR_PHY1_SM;
+       else
+               gssr = IXGBE_GSSR_PHY0_SM;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS)
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       if (status == IXGBE_SUCCESS) {
+               /* Put the data in the MDI single read and write data register*/
+               IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
+
+               /* Setup and write the address cycle command */
+               command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                          (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                          (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                          (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+               IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+               /*
+                * Check every 10 usec to see if the address cycle completed.
+                * The MDI Command bit will clear when the operation is
+                * complete
+                */
+               for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+                       usec_delay(10);
+
+                       command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+                       if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                               break;
+               }
+
+               if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+                       DEBUGOUT("PHY address cmd didn't complete\n");
+                       status = IXGBE_ERR_PHY;
+               }
+
+               if (status == IXGBE_SUCCESS) {
+                       /*
+                        * Address cycle complete, setup and write the write
+                        * command
+                        */
+                       command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT)  |
+                                  (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+                                  (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+                                  (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
+
+                       IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+                       /*
+                        * Check every 10 usec to see if the address cycle
+                        * completed. The MDI Command bit will clear when the
+                        * operation is complete
+                        */
+                       for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+                               usec_delay(10);
+
+                               command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+
+                               if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+                                       break;
+                       }
+
+                       if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+                               DEBUGOUT("PHY address cmd didn't complete\n");
+                               status = IXGBE_ERR_PHY;
+                       }
+               }
+
+               hw->mac.ops.release_swfw_sync(hw, gssr);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_setup_phy_link_generic - Set and restart autoneg
+ *  @hw: pointer to hardware structure
+ *
+ *  Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 time_out;
+       u32 max_time_out = 10;
+       u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+       bool autoneg = FALSE;
+       ixgbe_link_speed speed;
+
+       DEBUGFUNC("ixgbe_setup_phy_link_generic");
+
+       ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+               /* Set or unset auto-negotiation 10G advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+               /* Set or unset auto-negotiation 1G advertisement */
+               hw->phy.ops.read_reg(hw,
+                                    IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw,
+                                     IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL) {
+               /* Set or unset auto-negotiation 100M advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE |
+                                IXGBE_MII_100BASE_T_ADVERTISE_HALF);
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+                       autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       /* Restart PHY autonegotiation and wait for completion */
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+       autoneg_reg |= IXGBE_MII_RESTART;
+
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+       /* Wait for autonegotiation to finish */
+       for (time_out = 0; time_out < max_time_out; time_out++) {
+               usec_delay(10);
+               /* Restart PHY autonegotiation and wait for completion */
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                             &autoneg_reg);
+
+               autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+               if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+                       break;
+               }
+       }
+
+       if (time_out == max_time_out) {
+               status = IXGBE_ERR_LINK_SETUP;
+               DEBUGOUT("ixgbe_setup_phy_link_generic: time out");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ **/
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed,
+                                       bool autoneg,
+                                       bool autoneg_wait_to_complete)
+{
+       UNREFERENCED_2PARAMETER(autoneg, autoneg_wait_to_complete);
+
+       DEBUGFUNC("ixgbe_setup_phy_link_speed_generic");
+
+       /*
+        * Clear autoneg_advertised and set new values based on input link
+        * speed.
+        */
+       hw->phy.autoneg_advertised = 0;
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL)
+               hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
+
+       /* Setup link based on the new speed settings */
+       hw->phy.ops.setup_link(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @autoneg: boolean auto-negotiation value
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg)
+{
+       s32 status = IXGBE_ERR_LINK_SETUP;
+       u16 speed_ability;
+
+       DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic");
+
+       *speed = 0;
+       *autoneg = TRUE;
+
+       status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY,
+                                     IXGBE_MDIO_PMA_PMD_DEV_TYPE,
+                                     &speed_ability);
+
+       if (status == IXGBE_SUCCESS) {
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G)
+                       *speed |= IXGBE_LINK_SPEED_10GB_FULL;
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G)
+                       *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+               if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M)
+                       *speed |= IXGBE_LINK_SPEED_100_FULL;
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_check_phy_link_tnx - Determine link and speed status
+ *  @hw: pointer to hardware structure
+ *
+ *  Reads the VS1 register to determine if link is up and the current speed for
+ *  the PHY.
+ **/
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                             bool *link_up)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 time_out;
+       u32 max_time_out = 10;
+       u16 phy_link = 0;
+       u16 phy_speed = 0;
+       u16 phy_data = 0;
+
+       DEBUGFUNC("ixgbe_check_phy_link_tnx");
+
+       /* Initialize speed and link to default case */
+       *link_up = FALSE;
+       *speed = IXGBE_LINK_SPEED_10GB_FULL;
+
+       /*
+        * Check current speed and link status of the PHY register.
+        * This is a vendor specific register and may have to
+        * be changed for other copper PHYs.
+        */
+       for (time_out = 0; time_out < max_time_out; time_out++) {
+               usec_delay(10);
+               status = hw->phy.ops.read_reg(hw,
+                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS,
+                                       IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                       &phy_data);
+               phy_link = phy_data &
+                          IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS;
+               phy_speed = phy_data &
+                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS;
+               if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) {
+                       *link_up = TRUE;
+                       if (phy_speed ==
+                           IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS)
+                               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+                       break;
+               }
+       }
+
+       return status;
+}
+
+/**
+ *     ixgbe_setup_phy_link_tnx - Set and restart autoneg
+ *     @hw: pointer to hardware structure
+ *
+ *     Restart autonegotiation and PHY and waits for completion.
+ **/
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 time_out;
+       u32 max_time_out = 10;
+       u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
+       bool autoneg = FALSE;
+       ixgbe_link_speed speed;
+
+       DEBUGFUNC("ixgbe_setup_phy_link_tnx");
+
+       ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg);
+
+       if (speed & IXGBE_LINK_SPEED_10GB_FULL) {
+               /* Set or unset auto-negotiation 10G advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
+                       autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_1GB_FULL) {
+               /* Set or unset auto-negotiation 1G advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
+                       autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       if (speed & IXGBE_LINK_SPEED_100_FULL) {
+               /* Set or unset auto-negotiation 100M advertisement */
+               hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                    IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                    &autoneg_reg);
+
+               autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE;
+               if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
+                       autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE;
+
+               hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG,
+                                     IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                     autoneg_reg);
+       }
+
+       /* Restart PHY autonegotiation and wait for completion */
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                            IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg);
+
+       autoneg_reg |= IXGBE_MII_RESTART;
+
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL,
+                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg);
+
+       /* Wait for autonegotiation to finish */
+       for (time_out = 0; time_out < max_time_out; time_out++) {
+               usec_delay(10);
+               /* Restart PHY autonegotiation and wait for completion */
+               status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS,
+                                             IXGBE_MDIO_AUTO_NEG_DEV_TYPE,
+                                             &autoneg_reg);
+
+               autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE;
+               if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) {
+                       break;
+               }
+       }
+
+       if (time_out == max_time_out) {
+               status = IXGBE_ERR_LINK_SETUP;
+               DEBUGOUT("ixgbe_setup_phy_link_tnx: time out");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_tnx - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+                                       u16 *firmware_version)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx");
+
+       status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                     firmware_version);
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+ **/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                       u16 *firmware_version)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_get_phy_firmware_version_generic");
+
+       status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
+                                     IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE,
+                                     firmware_version);
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_phy_nl - Performs a PHY reset
+ *  @hw: pointer to hardware structure
+ **/
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
+{
+       u16 phy_offset, control, eword, edata, block_crc;
+       bool end_data = FALSE;
+       u16 list_offset, data_offset;
+       u16 phy_data = 0;
+       s32 ret_val = IXGBE_SUCCESS;
+       u32 i;
+
+       DEBUGFUNC("ixgbe_reset_phy_nl");
+
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                            IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+
+       /* reset the PHY and poll for completion */
+       hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                             IXGBE_MDIO_PHY_XS_DEV_TYPE,
+                             (phy_data | IXGBE_MDIO_PHY_XS_RESET));
+
+       for (i = 0; i < 100; i++) {
+               hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL,
+                                    IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data);
+               if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0)
+                       break;
+               msec_delay(10);
+       }
+
+       if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) {
+               DEBUGOUT("PHY reset did not complete.\n");
+               ret_val = IXGBE_ERR_PHY;
+               goto out;
+       }
+
+       /* Get init offsets */
+       ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
+                                                     &data_offset);
+       if (ret_val != IXGBE_SUCCESS)
+               goto out;
+
+       ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
+       data_offset++;
+       while (!end_data) {
+               /*
+                * Read control word from PHY init contents offset
+                */
+               ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+               control = (eword & IXGBE_CONTROL_MASK_NL) >>
+                          IXGBE_CONTROL_SHIFT_NL;
+               edata = eword & IXGBE_DATA_MASK_NL;
+               switch (control) {
+               case IXGBE_DELAY_NL:
+                       data_offset++;
+                       DEBUGOUT1("DELAY: %d MS\n", edata);
+                       msec_delay(edata);
+                       break;
+               case IXGBE_DATA_NL:
+                       DEBUGOUT("DATA:  \n");
+                       data_offset++;
+                       hw->eeprom.ops.read(hw, data_offset++,
+                                           &phy_offset);
+                       for (i = 0; i < edata; i++) {
+                               hw->eeprom.ops.read(hw, data_offset, &eword);
+                               hw->phy.ops.write_reg(hw, phy_offset,
+                                                     IXGBE_TWINAX_DEV, eword);
+                               DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword,
+                                         phy_offset);
+                               data_offset++;
+                               phy_offset++;
+                       }
+                       break;
+               case IXGBE_CONTROL_NL:
+                       data_offset++;
+                       DEBUGOUT("CONTROL: \n");
+                       if (edata == IXGBE_CONTROL_EOL_NL) {
+                               DEBUGOUT("EOL\n");
+                               end_data = TRUE;
+                       } else if (edata == IXGBE_CONTROL_SOL_NL) {
+                               DEBUGOUT("SOL\n");
+                       } else {
+                               DEBUGOUT("Bad control value\n");
+                               ret_val = IXGBE_ERR_PHY;
+                               goto out;
+                       }
+                       break;
+               default:
+                       DEBUGOUT("Bad control type\n");
+                       ret_val = IXGBE_ERR_PHY;
+                       goto out;
+               }
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ *  @hw: pointer to hardware structure
+ *
+ *  Searches for and identifies the SFP module and assigns appropriate PHY type.
+ **/
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       u32 vendor_oui = 0;
+       enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+       u8 identifier = 0;
+       u8 comp_codes_1g = 0;
+       u8 comp_codes_10g = 0;
+       u8 oui_bytes[3] = {0, 0, 0};
+       u8 cable_tech = 0;
+       u8 cable_spec = 0;
+       u16 enforce_sfp = 0;
+
+       DEBUGFUNC("ixgbe_identify_sfp_module_generic");
+
+       if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
+               hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+               status = IXGBE_ERR_SFP_NOT_PRESENT;
+               goto out;
+       }
+
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_IDENTIFIER,
+                                            &identifier);
+
+       if (status == IXGBE_ERR_SWFW_SYNC ||
+           status == IXGBE_ERR_I2C ||
+           status == IXGBE_ERR_SFP_NOT_PRESENT)
+               goto err_read_i2c_eeprom;
+
+       /* LAN ID is needed for sfp_type determination */
+       hw->mac.ops.set_lan_id(hw);
+
+       if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+       } else {
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_1GBE_COMP_CODES,
+                                                    &comp_codes_1g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_10GBE_COMP_CODES,
+                                                    &comp_codes_10g);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                    IXGBE_SFF_CABLE_TECHNOLOGY,
+                                                    &cable_tech);
+
+               if (status == IXGBE_ERR_SWFW_SYNC ||
+                   status == IXGBE_ERR_I2C ||
+                   status == IXGBE_ERR_SFP_NOT_PRESENT)
+                       goto err_read_i2c_eeprom;
+
+                /* ID Module
+                 * =========
+                 * 0   SFP_DA_CU
+                 * 1   SFP_SR
+                 * 2   SFP_LR
+                 * 3   SFP_DA_CORE0 - 82599-specific
+                 * 4   SFP_DA_CORE1 - 82599-specific
+                 * 5   SFP_SR/LR_CORE0 - 82599-specific
+                 * 6   SFP_SR/LR_CORE1 - 82599-specific
+                 * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+                 * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+                 * 9   SFP_1g_cu_CORE0 - 82599-specific
+                 * 10  SFP_1g_cu_CORE1 - 82599-specific
+                 */
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                               hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+                       else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                               hw->phy.sfp_type = ixgbe_sfp_type_sr;
+                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                               hw->phy.sfp_type = ixgbe_sfp_type_lr;
+                       else
+                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+               } else if (hw->mac.type == ixgbe_mac_82599EB) {
+                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+                               if (hw->bus.lan_id == 0)
+                                       hw->phy.sfp_type =
+                                                    ixgbe_sfp_type_da_cu_core0;
+                               else
+                                       hw->phy.sfp_type =
+                                                    ixgbe_sfp_type_da_cu_core1;
+                       } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+                               hw->phy.ops.read_i2c_eeprom(
+                                               hw, IXGBE_SFF_CABLE_SPEC_COMP,
+                                               &cable_spec);
+                               if (cable_spec &
+                                   IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
+                                       if (hw->bus.lan_id == 0)
+                                               hw->phy.sfp_type =
+                                               ixgbe_sfp_type_da_act_lmt_core0;
+                                       else
+                                               hw->phy.sfp_type =
+                                               ixgbe_sfp_type_da_act_lmt_core1;
+                               } else {
+                                       hw->phy.sfp_type =
+                                                       ixgbe_sfp_type_unknown;
+                               }
+                       } else if (comp_codes_10g &
+                                  (IXGBE_SFF_10GBASESR_CAPABLE |
+                                   IXGBE_SFF_10GBASELR_CAPABLE)) {
+                               if (hw->bus.lan_id == 0)
+                                       hw->phy.sfp_type =
+                                                     ixgbe_sfp_type_srlr_core0;
+                               else
+                                       hw->phy.sfp_type =
+                                                     ixgbe_sfp_type_srlr_core1;
+                       } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+                               if (hw->bus.lan_id == 0)
+                                       hw->phy.sfp_type =
+                                               ixgbe_sfp_type_1g_cu_core0;
+                               else
+                                       hw->phy.sfp_type =
+                                               ixgbe_sfp_type_1g_cu_core1;
+                       } else {
+                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+                       }
+               }
+
+               if (hw->phy.sfp_type != stored_sfp_type)
+                       hw->phy.sfp_setup_needed = TRUE;
+
+               /* Determine if the SFP+ PHY is dual speed or not. */
+               hw->phy.multispeed_fiber = FALSE;
+               if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+                  (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+                  ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+                  (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+                       hw->phy.multispeed_fiber = TRUE;
+
+               /* Determine PHY vendor */
+               if (hw->phy.type != ixgbe_phy_nl) {
+                       hw->phy.id = identifier;
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
+                                                   &oui_bytes[0]);
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
+                                                   &oui_bytes[1]);
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
+                                                   &oui_bytes[2]);
+
+                       if (status == IXGBE_ERR_SWFW_SYNC ||
+                           status == IXGBE_ERR_I2C ||
+                           status == IXGBE_ERR_SFP_NOT_PRESENT)
+                               goto err_read_i2c_eeprom;
+
+                       vendor_oui =
+                         ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+                          (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+                          (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+                       switch (vendor_oui) {
+                       case IXGBE_SFF_VENDOR_OUI_TYCO:
+                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                                       hw->phy.type =
+                                                   ixgbe_phy_sfp_passive_tyco;
+                               break;
+                       case IXGBE_SFF_VENDOR_OUI_FTL:
+                               if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+                                       hw->phy.type = ixgbe_phy_sfp_ftl_active;
+                               else
+                                       hw->phy.type = ixgbe_phy_sfp_ftl;
+                               break;
+                       case IXGBE_SFF_VENDOR_OUI_AVAGO:
+                               hw->phy.type = ixgbe_phy_sfp_avago;
+                               break;
+                       case IXGBE_SFF_VENDOR_OUI_INTEL:
+                               hw->phy.type = ixgbe_phy_sfp_intel;
+                               break;
+                       default:
+                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                                       hw->phy.type =
+                                                ixgbe_phy_sfp_passive_unknown;
+                               else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+                                       hw->phy.type =
+                                               ixgbe_phy_sfp_active_unknown;
+                               else
+                                       hw->phy.type = ixgbe_phy_sfp_unknown;
+                               break;
+                       }
+               }
+
+               /* Allow any DA cable vendor */
+               if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+                   IXGBE_SFF_DA_ACTIVE_CABLE)) {
+                       status = IXGBE_SUCCESS;
+                       goto out;
+               }
+
+               /* Verify supported 1G SFP modules */
+               if (comp_codes_10g == 0 &&
+                   !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+                     hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) {
+                       hw->phy.type = ixgbe_phy_sfp_unsupported;
+                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       goto out;
+               }
+
+               /* Anything else 82598-based is supported */
+               if (hw->mac.type == ixgbe_mac_82598EB) {
+                       status = IXGBE_SUCCESS;
+                       goto out;
+               }
+
+               ixgbe_get_device_caps(hw, &enforce_sfp);
+               if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+                   !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) ||
+                     (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) {
+                       /* Make sure we're a supported PHY type */
+                       if (hw->phy.type == ixgbe_phy_sfp_intel) {
+                               status = IXGBE_SUCCESS;
+                       } else {
+                               DEBUGOUT("SFP+ module not supported\n");
+                               hw->phy.type = ixgbe_phy_sfp_unsupported;
+                               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       }
+               } else {
+                       status = IXGBE_SUCCESS;
+               }
+       }
+
+out:
+       return status;
+
+err_read_i2c_eeprom:
+       hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+       if (hw->phy.type != ixgbe_phy_nl) {
+               hw->phy.id = 0;
+               hw->phy.type = ixgbe_phy_unknown;
+       }
+       return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
+ *  ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
+ *  @hw: pointer to hardware structure
+ *  @list_offset: offset to the SFP ID list
+ *  @data_offset: offset to the SFP data block
+ *
+ *  Checks the MAC's EEPROM to see if it supports a given SFP+ module type, if
+ *  so it returns the offsets to the phy init sequence block.
+ **/
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+                                        u16 *list_offset,
+                                        u16 *data_offset)
+{
+       u16 sfp_id;
+       u16 sfp_type = hw->phy.sfp_type;
+
+       DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets");
+
+       if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+       if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
+               return IXGBE_ERR_SFP_NOT_PRESENT;
+
+       if ((hw->device_id == IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) &&
+           (hw->phy.sfp_type == ixgbe_sfp_type_da_cu))
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+
+       /*
+        * Limiting active cables and 1G Phys must be initialized as
+        * SR modules
+        */
+       if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
+           sfp_type == ixgbe_sfp_type_1g_cu_core0)
+               sfp_type = ixgbe_sfp_type_srlr_core0;
+       else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
+                sfp_type == ixgbe_sfp_type_1g_cu_core1)
+               sfp_type = ixgbe_sfp_type_srlr_core1;
+
+       /* Read offset to PHY init contents */
+       hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+
+       if ((!*list_offset) || (*list_offset == 0xFFFF))
+               return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+
+       /* Shift offset to first ID word */
+       (*list_offset)++;
+
+       /*
+        * Find the matching SFP ID in the EEPROM
+        * and program the init sequence
+        */
+       hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+
+       while (sfp_id != IXGBE_PHY_INIT_END_NL) {
+               if (sfp_id == sfp_type) {
+                       (*list_offset)++;
+                       hw->eeprom.ops.read(hw, *list_offset, data_offset);
+                       if ((!*data_offset) || (*data_offset == 0xFFFF)) {
+                               DEBUGOUT("SFP+ module not supported\n");
+                               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+                       } else {
+                               break;
+                       }
+               } else {
+                       (*list_offset) += 2;
+                       if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+                               return IXGBE_ERR_PHY;
+               }
+       }
+
+       if (sfp_id == IXGBE_PHY_INIT_END_NL) {
+               DEBUGOUT("No matching SFP+ module found\n");
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_i2c_eeprom_generic - Reads 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to read
+ *  @eeprom_data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                  u8 *eeprom_data)
+{
+       DEBUGFUNC("ixgbe_read_i2c_eeprom_generic");
+
+       return hw->phy.ops.read_i2c_byte(hw, byte_offset,
+                                        IXGBE_I2C_EEPROM_DEV_ADDR,
+                                        eeprom_data);
+}
+
+/**
+ *  ixgbe_write_i2c_eeprom_generic - Writes 8 bit EEPROM word over I2C interface
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: EEPROM byte offset to write
+ *  @eeprom_data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface.
+ **/
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                   u8 eeprom_data)
+{
+       DEBUGFUNC("ixgbe_write_i2c_eeprom_generic");
+
+       return hw->phy.ops.write_i2c_byte(hw, byte_offset,
+                                         IXGBE_I2C_EEPROM_DEV_ADDR,
+                                         eeprom_data);
+}
+
+/**
+ *  ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @data: value read
+ *
+ *  Performs byte read operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                u8 dev_addr, u8 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 max_retry = 10;
+       u32 retry = 0;
+       u16 swfw_mask = 0;
+       bool nack = 1;
+       *data = 0;
+
+       DEBUGFUNC("ixgbe_read_i2c_byte_generic");
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+       do {
+               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
+                   != IXGBE_SUCCESS) {
+                       status = IXGBE_ERR_SWFW_SYNC;
+                       goto read_byte_out;
+               }
+
+               ixgbe_i2c_start(hw);
+
+               /* Device Address and write indication */
+               status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               ixgbe_i2c_start(hw);
+
+               /* Device Address and read indication */
+               status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1));
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_clock_in_i2c_byte(hw, data);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_clock_out_i2c_bit(hw, nack);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               ixgbe_i2c_stop(hw);
+               break;
+
+fail:
+               hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+               msec_delay(100);
+               ixgbe_i2c_bus_clear(hw);
+               retry++;
+               if (retry < max_retry)
+                       DEBUGOUT("I2C byte read error - Retrying.\n");
+               else
+                       DEBUGOUT("I2C byte read error.\n");
+
+       } while (retry < max_retry);
+
+       hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+read_byte_out:
+       return status;
+}
+
+/**
+ *  ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @data: value to write
+ *
+ *  Performs byte write operation to SFP module's EEPROM over I2C interface at
+ *  a specified device address.
+ **/
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                 u8 dev_addr, u8 data)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 max_retry = 1;
+       u32 retry = 0;
+       u16 swfw_mask = 0;
+
+       DEBUGFUNC("ixgbe_write_i2c_byte_generic");
+
+       if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+               swfw_mask = IXGBE_GSSR_PHY1_SM;
+       else
+               swfw_mask = IXGBE_GSSR_PHY0_SM;
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != IXGBE_SUCCESS) {
+               status = IXGBE_ERR_SWFW_SYNC;
+               goto write_byte_out;
+       }
+
+       do {
+               ixgbe_i2c_start(hw);
+
+               status = ixgbe_clock_out_i2c_byte(hw, dev_addr);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_clock_out_i2c_byte(hw, byte_offset);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_clock_out_i2c_byte(hw, data);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               status = ixgbe_get_i2c_ack(hw);
+               if (status != IXGBE_SUCCESS)
+                       goto fail;
+
+               ixgbe_i2c_stop(hw);
+               break;
+
+fail:
+               ixgbe_i2c_bus_clear(hw);
+               retry++;
+               if (retry < max_retry)
+                       DEBUGOUT("I2C byte write error - Retrying.\n");
+               else
+                       DEBUGOUT("I2C byte write error.\n");
+       } while (retry < max_retry);
+
+       hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+write_byte_out:
+       return status;
+}
+
+/**
+ *  ixgbe_i2c_start - Sets I2C start condition
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets I2C start condition (High -> Low on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_start(struct ixgbe_hw *hw)
+{
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+       DEBUGFUNC("ixgbe_i2c_start");
+
+       /* Start condition must begin with data and clock high */
+       ixgbe_set_i2c_data(hw, &i2cctl, 1);
+       ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+       /* Setup time for start condition (4.7us) */
+       usec_delay(IXGBE_I2C_T_SU_STA);
+
+       ixgbe_set_i2c_data(hw, &i2cctl, 0);
+
+       /* Hold time for start condition (4us) */
+       usec_delay(IXGBE_I2C_T_HD_STA);
+
+       ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+       /* Minimum low period of clock is 4.7 us */
+       usec_delay(IXGBE_I2C_T_LOW);
+
+}
+
+/**
+ *  ixgbe_i2c_stop - Sets I2C stop condition
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets I2C stop condition (Low -> High on SDA while SCL is High)
+ **/
+static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
+{
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+       DEBUGFUNC("ixgbe_i2c_stop");
+
+       /* Stop condition must begin with data low and clock high */
+       ixgbe_set_i2c_data(hw, &i2cctl, 0);
+       ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+       /* Setup time for stop condition (4us) */
+       usec_delay(IXGBE_I2C_T_SU_STO);
+
+       ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+       /* bus free time between stop and start (4.7us)*/
+       usec_delay(IXGBE_I2C_T_BUF);
+}
+
+/**
+ *  ixgbe_clock_in_i2c_byte - Clocks in one byte via I2C
+ *  @hw: pointer to hardware structure
+ *  @data: data byte to clock in
+ *
+ *  Clocks in one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
+{
+       s32 i;
+       bool bit = 0;
+
+       DEBUGFUNC("ixgbe_clock_in_i2c_byte");
+
+       for (i = 7; i >= 0; i--) {
+               ixgbe_clock_in_i2c_bit(hw, &bit);
+               *data |= bit << i;
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clock_out_i2c_byte - Clocks out one byte via I2C
+ *  @hw: pointer to hardware structure
+ *  @data: data byte clocked out
+ *
+ *  Clocks out one byte data via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
+{
+       s32 status = IXGBE_SUCCESS;
+       s32 i;
+       u32 i2cctl;
+       bool bit = 0;
+
+       DEBUGFUNC("ixgbe_clock_out_i2c_byte");
+
+       for (i = 7; i >= 0; i--) {
+               bit = (data >> i) & 0x1;
+               status = ixgbe_clock_out_i2c_bit(hw, bit);
+
+               if (status != IXGBE_SUCCESS)
+                       break;
+       }
+
+       /* Release SDA line (set high) */
+       i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+       i2cctl |= IXGBE_I2C_DATA_OUT;
+       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_i2c_ack - Polls for I2C ACK
+ *  @hw: pointer to hardware structure
+ *
+ *  Clocks in/out one bit via I2C data/clock
+ **/
+static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u32 i = 0;
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+       u32 timeout = 10;
+       bool ack = 1;
+
+       DEBUGFUNC("ixgbe_get_i2c_ack");
+
+       ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+
+       /* Minimum high period of clock is 4us */
+       usec_delay(IXGBE_I2C_T_HIGH);
+
+       /* Poll for ACK.  Note that ACK in I2C spec is
+        * transition from 1 to 0 */
+       for (i = 0; i < timeout; i++) {
+               i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+               ack = ixgbe_get_i2c_data(&i2cctl);
+
+               usec_delay(1);
+               if (ack == 0)
+                       break;
+       }
+
+       if (ack == 1) {
+               DEBUGOUT("I2C ack was not received.\n");
+               status = IXGBE_ERR_I2C;
+       }
+
+       ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+       /* Minimum low period of clock is 4.7 us */
+       usec_delay(IXGBE_I2C_T_LOW);
+
+       return status;
+}
+
+/**
+ *  ixgbe_clock_in_i2c_bit - Clocks in one bit via I2C data/clock
+ *  @hw: pointer to hardware structure
+ *  @data: read data value
+ *
+ *  Clocks in one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data)
+{
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+       DEBUGFUNC("ixgbe_clock_in_i2c_bit");
+
+       ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+       /* Minimum high period of clock is 4us */
+       usec_delay(IXGBE_I2C_T_HIGH);
+
+       i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+       *data = ixgbe_get_i2c_data(&i2cctl);
+
+       ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+       /* Minimum low period of clock is 4.7 us */
+       usec_delay(IXGBE_I2C_T_LOW);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_clock_out_i2c_bit - Clocks in/out one bit via I2C data/clock
+ *  @hw: pointer to hardware structure
+ *  @data: data value to write
+ *
+ *  Clocks out one bit via I2C data/clock
+ **/
+static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
+{
+       s32 status;
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+
+       DEBUGFUNC("ixgbe_clock_out_i2c_bit");
+
+       status = ixgbe_set_i2c_data(hw, &i2cctl, data);
+       if (status == IXGBE_SUCCESS) {
+               ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+               /* Minimum high period of clock is 4us */
+               usec_delay(IXGBE_I2C_T_HIGH);
+
+               ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+               /* Minimum low period of clock is 4.7 us.
+                * This also takes care of the data hold time.
+                */
+               usec_delay(IXGBE_I2C_T_LOW);
+       } else {
+               status = IXGBE_ERR_I2C;
+               DEBUGOUT1("I2C data was not set to %X\n", data);
+       }
+
+       return status;
+}
+/**
+ *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Raises the I2C clock line '0'->'1'
+ **/
+static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+       DEBUGFUNC("ixgbe_raise_i2c_clk");
+
+       *i2cctl |= IXGBE_I2C_CLK_OUT;
+
+       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* SCL rise time (1000ns) */
+       usec_delay(IXGBE_I2C_T_RISE);
+}
+
+/**
+ *  ixgbe_lower_i2c_clk - Lowers the I2C SCL clock
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Lowers the I2C clock line '1'->'0'
+ **/
+static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
+{
+
+       DEBUGFUNC("ixgbe_lower_i2c_clk");
+
+       *i2cctl &= ~IXGBE_I2C_CLK_OUT;
+
+       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* SCL fall time (300ns) */
+       usec_delay(IXGBE_I2C_T_FALL);
+}
+
+/**
+ *  ixgbe_set_i2c_data - Sets the I2C data bit
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *  @data: I2C data value (0 or 1) to set
+ *
+ *  Sets the I2C data bit
+ **/
+static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_set_i2c_data");
+
+       if (data)
+               *i2cctl |= IXGBE_I2C_DATA_OUT;
+       else
+               *i2cctl &= ~IXGBE_I2C_DATA_OUT;
+
+       IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */
+       usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA);
+
+       /* Verify data was set correctly */
+       *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+       if (data != ixgbe_get_i2c_data(i2cctl)) {
+               status = IXGBE_ERR_I2C;
+               DEBUGOUT1("Error - I2C data was not set to %X.\n", data);
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_get_i2c_data - Reads the I2C SDA data bit
+ *  @hw: pointer to hardware structure
+ *  @i2cctl: Current value of I2CCTL register
+ *
+ *  Returns the I2C data bit value
+ **/
+static bool ixgbe_get_i2c_data(u32 *i2cctl)
+{
+       bool data;
+
+       DEBUGFUNC("ixgbe_get_i2c_data");
+
+       if (*i2cctl & IXGBE_I2C_DATA_IN)
+               data = 1;
+       else
+               data = 0;
+
+       return data;
+}
+
+/**
+ *  ixgbe_i2c_bus_clear - Clears the I2C bus
+ *  @hw: pointer to hardware structure
+ *
+ *  Clears the I2C bus by sending nine clock pulses.
+ *  Used when data line is stuck low.
+ **/
+void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
+{
+       u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
+       u32 i;
+
+       DEBUGFUNC("ixgbe_i2c_bus_clear");
+
+       ixgbe_i2c_start(hw);
+
+       ixgbe_set_i2c_data(hw, &i2cctl, 1);
+
+       for (i = 0; i < 9; i++) {
+               ixgbe_raise_i2c_clk(hw, &i2cctl);
+
+               /* Min high period of clock is 4us */
+               usec_delay(IXGBE_I2C_T_HIGH);
+
+               ixgbe_lower_i2c_clk(hw, &i2cctl);
+
+               /* Min low period of clock is 4.7us*/
+               usec_delay(IXGBE_I2C_T_LOW);
+       }
+
+       ixgbe_i2c_start(hw);
+
+       /* Put the i2c bus back to default state */
+       ixgbe_i2c_stop(hw);
+}
+
+/**
+ *  ixgbe_tn_check_overtemp - Checks if an overtemp occured.
+ *  @hw: pointer to hardware structure
+ *
+ *  Checks if the LASI temp alarm status was triggered due to overtemp
+ **/
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_SUCCESS;
+       u16 phy_data = 0;
+
+       DEBUGFUNC("ixgbe_tn_check_overtemp");
+
+       if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
+               goto out;
+
+       /* Check that the LASI temp alarm status was triggered */
+       hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
+                            IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data);
+
+       if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
+               goto out;
+
+       status = IXGBE_ERR_OVERTEMP;
+out:
+       return status;
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_phy.h
new file mode 100644 (file)
index 0000000..5c5dfa6
--- /dev/null
@@ -0,0 +1,141 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_PHY_H_
+#define _IXGBE_PHY_H_
+
+#include "ixgbe_type.h"
+#define IXGBE_I2C_EEPROM_DEV_ADDR    0xA0
+
+/* EEPROM byte offsets */
+#define IXGBE_SFF_IDENTIFIER         0x0
+#define IXGBE_SFF_IDENTIFIER_SFP     0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0   0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1   0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2   0x27
+#define IXGBE_SFF_1GBE_COMP_CODES    0x6
+#define IXGBE_SFF_10GBE_COMP_CODES   0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY   0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP    0x3C
+
+/* Bitmasks */
+#define IXGBE_SFF_DA_PASSIVE_CABLE           0x4
+#define IXGBE_SFF_DA_ACTIVE_CABLE            0x8
+#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING    0x4
+#define IXGBE_SFF_1GBASESX_CAPABLE           0x1
+#define IXGBE_SFF_1GBASELX_CAPABLE           0x2
+#define IXGBE_SFF_1GBASET_CAPABLE            0x8
+#define IXGBE_SFF_10GBASESR_CAPABLE          0x10
+#define IXGBE_SFF_10GBASELR_CAPABLE          0x20
+#define IXGBE_I2C_EEPROM_READ_MASK           0x100
+#define IXGBE_I2C_EEPROM_STATUS_MASK         0x3
+#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
+#define IXGBE_I2C_EEPROM_STATUS_PASS         0x1
+#define IXGBE_I2C_EEPROM_STATUS_FAIL         0x2
+#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS  0x3
+
+/* Flow control defines */
+#define IXGBE_TAF_SYM_PAUSE                  0x400
+#define IXGBE_TAF_ASM_PAUSE                  0x800
+
+/* Bit-shift macros */
+#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT    24
+#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT    16
+#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT    8
+
+/* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */
+#define IXGBE_SFF_VENDOR_OUI_TYCO     0x00407600
+#define IXGBE_SFF_VENDOR_OUI_FTL      0x00906500
+#define IXGBE_SFF_VENDOR_OUI_AVAGO    0x00176A00
+#define IXGBE_SFF_VENDOR_OUI_INTEL    0x001B2100
+
+/* I2C SDA and SCL timing parameters for standard mode */
+#define IXGBE_I2C_T_HD_STA  4
+#define IXGBE_I2C_T_LOW     5
+#define IXGBE_I2C_T_HIGH    4
+#define IXGBE_I2C_T_SU_STA  5
+#define IXGBE_I2C_T_HD_DATA 5
+#define IXGBE_I2C_T_SU_DATA 1
+#define IXGBE_I2C_T_RISE    1
+#define IXGBE_I2C_T_FALL    1
+#define IXGBE_I2C_T_SU_STO  4
+#define IXGBE_I2C_T_BUF     5
+
+#define IXGBE_TN_LASI_STATUS_REG        0x9005
+#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008
+
+s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
+bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr);
+enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id);
+s32 ixgbe_get_phy_id(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
+s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                               u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
+                                u32 device_type, u16 phy_data);
+s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
+                                       ixgbe_link_speed speed,
+                                       bool autoneg,
+                                       bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                             ixgbe_link_speed *speed,
+                                             bool *autoneg);
+
+/* PHY specific */
+s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
+                             ixgbe_link_speed *speed,
+                             bool *link_up);
+s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
+                                       u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                       u16 *firmware_version);
+
+s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
+                                        u16 *list_offset,
+                                        u16 *data_offset);
+s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
+s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                u8 dev_addr, u8 *data);
+s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                 u8 dev_addr, u8 data);
+s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                  u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
+                                   u8 eeprom_data);
+#endif /* _IXGBE_PHY_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_type.h
new file mode 100644 (file)
index 0000000..a3a3c5b
--- /dev/null
@@ -0,0 +1,3138 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_TYPE_H_
+#define _IXGBE_TYPE_H_
+
+#include "ixgbe_osdep.h"
+
+
+/* Vendor ID */
+#define IXGBE_INTEL_VENDOR_ID   0x8086
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82598               0x10B6
+#define IXGBE_DEV_ID_82598_BX            0x1508
+#define IXGBE_DEV_ID_82598AF_DUAL_PORT   0x10C6
+#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7
+#define IXGBE_DEV_ID_82598AT             0x10C8
+#define IXGBE_DEV_ID_82598AT2            0x150B
+#define IXGBE_DEV_ID_82598EB_SFP_LOM     0x10DB
+#define IXGBE_DEV_ID_82598EB_CX4         0x10DD
+#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC
+#define IXGBE_DEV_ID_82598_DA_DUAL_PORT  0x10F1
+#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM      0x10E1
+#define IXGBE_DEV_ID_82598EB_XF_LR       0x10F4
+#define IXGBE_DEV_ID_82599_KX4  0x10F7
+#define IXGBE_DEV_ID_82599_KX4_MEZZ      0x1514
+#define IXGBE_DEV_ID_82599_KR   0x1517
+#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE      0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ       0x000C
+#define IXGBE_DEV_ID_82599_CX4  0x10F9
+#define IXGBE_DEV_ID_82599_SFP  0x10FB
+#define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152A
+#define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
+#define IXGBE_DEV_ID_82599_SFP_EM        0x1507
+#define IXGBE_DEV_ID_82599EN_SFP         0x1557
+#define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
+#define IXGBE_DEV_ID_82599_T3_LOM        0x151C
+#define IXGBE_DEV_ID_82599_VF   0x10ED
+#define IXGBE_DEV_ID_X540_VF    0x1515
+#define IXGBE_DEV_ID_X540T      0x1528
+
+/* General Registers */
+#define IXGBE_CTRL      0x00000
+#define IXGBE_STATUS    0x00008
+#define IXGBE_CTRL_EXT  0x00018
+#define IXGBE_ESDP      0x00020
+#define IXGBE_EODSDP    0x00028
+#define IXGBE_I2CCTL    0x00028
+#define IXGBE_PHY_GPIO          0x00028
+#define IXGBE_MAC_GPIO          0x00030
+#define IXGBE_PHYINT_STATUS0    0x00100
+#define IXGBE_PHYINT_STATUS1    0x00104
+#define IXGBE_PHYINT_STATUS2    0x00108
+#define IXGBE_LEDCTL    0x00200
+#define IXGBE_FRTIMER   0x00048
+#define IXGBE_TCPTIMER  0x0004C
+#define IXGBE_CORESPARE 0x00600
+#define IXGBE_EXVET     0x05078
+
+/* NVM Registers */
+#define IXGBE_EEC       0x10010
+#define IXGBE_EERD      0x10014
+#define IXGBE_EEWR      0x10018
+#define IXGBE_FLA       0x1001C
+#define IXGBE_EEMNGCTL  0x10110
+#define IXGBE_EEMNGDATA 0x10114
+#define IXGBE_FLMNGCTL  0x10118
+#define IXGBE_FLMNGDATA 0x1011C
+#define IXGBE_FLMNGCNT  0x10120
+#define IXGBE_FLOP      0x1013C
+#define IXGBE_GRC       0x10200
+#define IXGBE_SRAMREL   0x10210
+#define IXGBE_PHYDBG    0x10218
+
+/* General Receive Control */
+#define IXGBE_GRC_MNG   0x00000001 /* Manageability Enable */
+#define IXGBE_GRC_APME  0x00000002 /* APM enabled in EEPROM */
+
+#define IXGBE_VPDDIAG0  0x10204
+#define IXGBE_VPDDIAG1  0x10208
+
+/* I2CCTL Bit Masks */
+#define IXGBE_I2C_CLK_IN        0x00000001
+#define IXGBE_I2C_CLK_OUT       0x00000002
+#define IXGBE_I2C_DATA_IN       0x00000004
+#define IXGBE_I2C_DATA_OUT      0x00000008
+#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
+
+/* Interrupt Registers */
+#define IXGBE_EICR      0x00800
+#define IXGBE_EICS      0x00808
+#define IXGBE_EIMS      0x00880
+#define IXGBE_EIMC      0x00888
+#define IXGBE_EIAC      0x00810
+#define IXGBE_EIAM      0x00890
+#define IXGBE_EICS_EX(_i)       (0x00A90 + (_i) * 4)
+#define IXGBE_EIMS_EX(_i)       (0x00AA0 + (_i) * 4)
+#define IXGBE_EIMC_EX(_i)       (0x00AB0 + (_i) * 4)
+#define IXGBE_EIAM_EX(_i)       (0x00AD0 + (_i) * 4)
+/* 82599 EITR is only 12 bits, with the lower 3 always zero */
+/*
+ * 82598 EITR is 16 bits but set the limits based on the max
+ * supported by all ixgbe hardware
+ */
+#define IXGBE_MAX_INT_RATE      488281
+#define IXGBE_MIN_INT_RATE      956
+#define IXGBE_MAX_EITR          0x00000FF8
+#define IXGBE_MIN_EITR          8
+#define IXGBE_EITR(_i)  (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
+                         (0x012300 + (((_i) - 24) * 4)))
+#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
+#define IXGBE_EITR_LLI_MOD      0x00008000
+#define IXGBE_EITR_CNT_WDIS     0x80000000
+#define IXGBE_IVAR(_i)  (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */
+#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */
+#define IXGBE_EITRSEL   0x00894
+#define IXGBE_MSIXT     0x00000 /* MSI-X Table. 0x0000 - 0x01C */
+#define IXGBE_MSIXPBA   0x02000 /* MSI-X Pending bit array */
+#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4)))
+#define IXGBE_GPIE      0x00898
+
+/* Flow Control Registers */
+#define IXGBE_FCADBUL   0x03210
+#define IXGBE_FCADBUH   0x03214
+#define IXGBE_FCAMACL   0x04328
+#define IXGBE_FCAMACH   0x0432C
+#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_PFCTOP    0x03008
+#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */
+#define IXGBE_FCRTV     0x032A0
+#define IXGBE_FCCFG     0x03D00
+#define IXGBE_TFCS      0x0CE00
+
+/* Receive DMA Registers */
+#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
+                         (0x0D000 + ((_i - 64) * 0x40)))
+#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
+                         (0x0D004 + ((_i - 64) * 0x40)))
+#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
+                         (0x0D008 + ((_i - 64) * 0x40)))
+#define IXGBE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
+                         (0x0D010 + ((_i - 64) * 0x40)))
+#define IXGBE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
+                         (0x0D018 + ((_i - 64) * 0x40)))
+#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
+                          (0x0D028 + ((_i - 64) * 0x40)))
+#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
+                          (0x0D02C + ((_i - 64) * 0x40)))
+#define IXGBE_RSCDBU     0x03028
+#define IXGBE_RDDCC      0x02F20
+#define IXGBE_RXMEMWRAP  0x03190
+#define IXGBE_STARCTRL   0x03024
+/*
+ * Split and Replication Receive Control Registers
+ * 00-15 : 0x02100 + n*4
+ * 16-64 : 0x01014 + n*0x40
+ * 64-127: 0x0D014 + (n-64)*0x40
+ */
+#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
+                          (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+                          (0x0D014 + ((_i - 64) * 0x40))))
+/*
+ * Rx DCA Control Register:
+ * 00-15 : 0x02200 + n*4
+ * 16-64 : 0x0100C + n*0x40
+ * 64-127: 0x0D00C + (n-64)*0x40
+ */
+#define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
+                                 (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+                                 (0x0D00C + ((_i - 64) * 0x40))))
+#define IXGBE_RDRXCTL           0x02F00
+#define IXGBE_RDRXCTL_RSC_PUSH  0x80
+#define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
+                                             /* 8 of these 0x03C00 - 0x03C1C */
+#define IXGBE_RXCTRL    0x03000
+#define IXGBE_DROPEN    0x03D04
+#define IXGBE_RXPBSIZE_SHIFT 10
+
+/* Receive Registers */
+#define IXGBE_RXCSUM    0x05000
+#define IXGBE_RFCTL     0x05008
+#define IXGBE_DRECCCTL  0x02F08
+#define IXGBE_DRECCCTL_DISABLE 0
+#define IXGBE_DRECCCTL2 0x02F8C
+
+/* Multicast Table Array - 128 entries */
+#define IXGBE_MTA(_i)   (0x05200 + ((_i) * 4))
+#define IXGBE_RAL(_i)   (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+                         (0x0A200 + ((_i) * 8)))
+#define IXGBE_RAH(_i)   (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+                         (0x0A204 + ((_i) * 8)))
+#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
+#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
+/* Packet split receive type */
+#define IXGBE_PSRTYPE(_i)    (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
+                              (0x0EA00 + ((_i) * 4)))
+/* array of 4096 1-bit vlan filters */
+#define IXGBE_VFTA(_i)  (0x0A000 + ((_i) * 4))
+/*array of 4096 4-bit vlan vmdq indices */
+#define IXGBE_VFTAVIND(_j, _i)  (0x0A200 + ((_j) * 0x200) + ((_i) * 4))
+#define IXGBE_FCTRL     0x05080
+#define IXGBE_VLNCTRL   0x05088
+#define IXGBE_MCSTCTRL  0x05090
+#define IXGBE_MRQC      0x05818
+#define IXGBE_SAQF(_i)  (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */
+#define IXGBE_DAQF(_i)  (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */
+#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */
+#define IXGBE_FTQF(_i)  (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */
+#define IXGBE_ETQF(_i)  (0x05128 + ((_i) * 4)) /* EType Queue Filter */
+#define IXGBE_ETQS(_i)  (0x0EC00 + ((_i) * 4)) /* EType Queue Select */
+#define IXGBE_SYNQF     0x0EC30 /* SYN Packet Queue Filter */
+#define IXGBE_RQTC      0x0EC70
+#define IXGBE_MTQC      0x08120
+#define IXGBE_VLVF(_i)  (0x0F100 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4))  /* 128 of these (0-127) */
+#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4))  /* 64 of these (0-63) */
+#define IXGBE_VT_CTL         0x051B0
+#define IXGBE_PFMAILBOX(_i)  (0x04B00 + (4 * (_i))) /* 64 total */
+#define IXGBE_PFMBMEM(_i)    (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */
+#define IXGBE_PFMBICR(_i)    (0x00710 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFMBIMR(_i)    (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_VFRE(_i)       (0x051E0 + ((_i) * 4))
+#define IXGBE_VFTE(_i)       (0x08110 + ((_i) * 4))
+#define IXGBE_VMECM(_i)      (0x08790 + ((_i) * 4))
+#define IXGBE_QDE            0x2F04
+#define IXGBE_VMTXSW(_i)     (0x05180 + ((_i) * 4)) /* 2 total */
+#define IXGBE_VMOLR(_i)      (0x0F000 + ((_i) * 4)) /* 64 total */
+#define IXGBE_UTA(_i)        (0x0F400 + ((_i) * 4))
+#define IXGBE_MRCTL(_i)      (0x0F600 + ((_i) * 4))
+#define IXGBE_VMRVLAN(_i)    (0x0F610 + ((_i) * 4))
+#define IXGBE_VMRVM(_i)      (0x0F630 + ((_i) * 4))
+#define IXGBE_L34T_IMIR(_i)  (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
+#define IXGBE_RXFECCERR0         0x051B8
+#define IXGBE_LLITHRESH 0x0EC90
+#define IXGBE_IMIR(_i)  (0x05A80 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIREXT(_i)       (0x05AA0 + ((_i) * 4))  /* 8 of these (0-7) */
+#define IXGBE_IMIRVP    0x05AC0
+#define IXGBE_VMD_CTL   0x0581C
+#define IXGBE_RETA(_i)  (0x05C00 + ((_i) * 4))  /* 32 of these (0-31) */
+#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4))  /* 10 of these (0-9) */
+
+/* Flow Director registers */
+#define IXGBE_FDIRCTRL  0x0EE00
+#define IXGBE_FDIRHKEY  0x0EE68
+#define IXGBE_FDIRSKEY  0x0EE6C
+#define IXGBE_FDIRDIP4M 0x0EE3C
+#define IXGBE_FDIRSIP4M 0x0EE40
+#define IXGBE_FDIRTCPM  0x0EE44
+#define IXGBE_FDIRUDPM  0x0EE48
+#define IXGBE_FDIRIP6M  0x0EE74
+#define IXGBE_FDIRM     0x0EE70
+
+/* Flow Director Stats registers */
+#define IXGBE_FDIRFREE  0x0EE38
+#define IXGBE_FDIRLEN   0x0EE4C
+#define IXGBE_FDIRUSTAT 0x0EE50
+#define IXGBE_FDIRFSTAT 0x0EE54
+#define IXGBE_FDIRMATCH 0x0EE58
+#define IXGBE_FDIRMISS  0x0EE5C
+
+/* Flow Director Programming registers */
+#define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */
+#define IXGBE_FDIRIPSA      0x0EE18
+#define IXGBE_FDIRIPDA      0x0EE1C
+#define IXGBE_FDIRPORT      0x0EE20
+#define IXGBE_FDIRVLAN      0x0EE24
+#define IXGBE_FDIRHASH      0x0EE28
+#define IXGBE_FDIRCMD       0x0EE2C
+
+/* Transmit DMA registers */
+#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/
+#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40))
+#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40))
+#define IXGBE_TDH(_i)   (0x06010 + ((_i) * 0x40))
+#define IXGBE_TDT(_i)   (0x06018 + ((_i) * 0x40))
+#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40))
+#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40))
+#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
+#define IXGBE_DTXCTL    0x07E00
+
+#define IXGBE_DMATXCTL          0x04A80
+#define IXGBE_PFVFSPOOF(_i)     (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
+#define IXGBE_PFDTXGSWC         0x08220
+#define IXGBE_DTXMXSZRQ         0x08100
+#define IXGBE_DTXTCPFLGL        0x04A88
+#define IXGBE_DTXTCPFLGH        0x04A8C
+#define IXGBE_LBDRPEN           0x0CA00
+#define IXGBE_TXPBTHRESH(_i)    (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */
+
+#define IXGBE_DMATXCTL_TE       0x1 /* Transmit Enable */
+#define IXGBE_DMATXCTL_NS       0x2 /* No Snoop LSO hdr buffer */
+#define IXGBE_DMATXCTL_GDV      0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_VT_SHIFT 16  /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
+
+/* Anti-spoofing defines */
+#define IXGBE_SPOOF_MACAS_MASK          0xFF
+#define IXGBE_SPOOF_VLANAS_MASK         0xFF00
+#define IXGBE_SPOOF_VLANAS_SHIFT        8
+#define IXGBE_PFVFSPOOF_REG_COUNT       8
+#define IXGBE_DCA_TXCTRL(_i)    (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
+/* Tx DCA Control register : 128 of these (0-127) */
+#define IXGBE_DCA_TXCTRL_82599(_i)  (0x0600C + ((_i) * 0x40))
+#define IXGBE_TIPG      0x0CB00
+#define IXGBE_TXPBSIZE(_i)      (0x0CC00 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_MNGTXMAP  0x0CD10
+#define IXGBE_TIPG_FIBER_DEFAULT 3
+#define IXGBE_TXPBSIZE_SHIFT    10
+
+/* Wake up registers */
+#define IXGBE_WUC       0x05800
+#define IXGBE_WUFC      0x05808
+#define IXGBE_WUS       0x05810
+#define IXGBE_IPAV      0x05838
+#define IXGBE_IP4AT     0x05840 /* IPv4 table 0x5840-0x5858 */
+#define IXGBE_IP6AT     0x05880 /* IPv6 table 0x5880-0x588F */
+
+#define IXGBE_WUPL      0x05900
+#define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
+#define IXGBE_FHFT(_n)     (0x09000 + (_n * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
+                                                     * Filter Table */
+
+#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX         4
+#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX  128
+#define IXGBE_FHFT_LENGTH_OFFSET        0xFC  /* Length byte in FHFT */
+#define IXGBE_FHFT_LENGTH_MASK          0x0FF /* Length in lower byte */
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define IXGBE_WUC_PME_EN     0x00000002 /* PME Enable */
+#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define IXGBE_WUC_WKEN       0x00000010 /* Enable PE_WAKE_N pin assertion  */
+
+/* Wake Up Filter Control */
+#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define IXGBE_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define IXGBE_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define IXGBE_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define IXGBE_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define IXGBE_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define IXGBE_WUFC_MNG  0x00000100 /* Directed Mgmt Packet Wakeup Enable */
+
+#define IXGBE_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
+#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */
+#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */
+#define IXGBE_WUFC_FLX_FILTERS     0x000F0000 /* Mask for 4 flex filters */
+#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */
+#define IXGBE_WUFC_ALL_FILTERS     0x003F00FF /* Mask for all wakeup filters */
+#define IXGBE_WUFC_FLX_OFFSET      16 /* Offset to the Flexible Filters bits */
+
+/* Wake Up Status */
+#define IXGBE_WUS_LNKC  IXGBE_WUFC_LNKC
+#define IXGBE_WUS_MAG   IXGBE_WUFC_MAG
+#define IXGBE_WUS_EX    IXGBE_WUFC_EX
+#define IXGBE_WUS_MC    IXGBE_WUFC_MC
+#define IXGBE_WUS_BC    IXGBE_WUFC_BC
+#define IXGBE_WUS_ARP   IXGBE_WUFC_ARP
+#define IXGBE_WUS_IPV4  IXGBE_WUFC_IPV4
+#define IXGBE_WUS_IPV6  IXGBE_WUFC_IPV6
+#define IXGBE_WUS_MNG   IXGBE_WUFC_MNG
+#define IXGBE_WUS_FLX0  IXGBE_WUFC_FLX0
+#define IXGBE_WUS_FLX1  IXGBE_WUFC_FLX1
+#define IXGBE_WUS_FLX2  IXGBE_WUFC_FLX2
+#define IXGBE_WUS_FLX3  IXGBE_WUFC_FLX3
+#define IXGBE_WUS_FLX4  IXGBE_WUFC_FLX4
+#define IXGBE_WUS_FLX5  IXGBE_WUFC_FLX5
+#define IXGBE_WUS_FLX_FILTERS  IXGBE_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define IXGBE_WUPL_LENGTH_MASK 0xFFFF
+
+/* DCB registers */
+#define MAX_TRAFFIC_CLASS 8
+#define IXGBE_RMCS      0x03D00
+#define IXGBE_DPMCS     0x07F40
+#define IXGBE_PDPMCS    0x0CD00
+#define IXGBE_RUPPBMR   0x050A0
+#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCCR(_i)     (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDTQ2TCSR(_i)     (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCCR(_i)     (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TDPT2TCSR(_i)     (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+
+
+/* Security Control Registers */
+#define IXGBE_SECTXCTRL         0x08800
+#define IXGBE_SECTXSTAT         0x08804
+#define IXGBE_SECTXBUFFAF       0x08808
+#define IXGBE_SECTXMINIFG       0x08810
+#define IXGBE_SECRXCTRL         0x08D00
+#define IXGBE_SECRXSTAT         0x08D04
+
+/* Security Bit Fields and Masks */
+#define IXGBE_SECTXCTRL_SECTX_DIS       0x00000001
+#define IXGBE_SECTXCTRL_TX_DIS          0x00000002
+#define IXGBE_SECTXCTRL_STORE_FORWARD   0x00000004
+
+#define IXGBE_SECTXSTAT_SECTX_RDY       0x00000001
+#define IXGBE_SECTXSTAT_ECC_TXERR       0x00000002
+
+#define IXGBE_SECRXCTRL_SECRX_DIS       0x00000001
+#define IXGBE_SECRXCTRL_RX_DIS          0x00000002
+
+#define IXGBE_SECRXSTAT_SECRX_RDY       0x00000001
+#define IXGBE_SECRXSTAT_ECC_RXERR       0x00000002
+
+/* LinkSec (MacSec) Registers */
+#define IXGBE_LSECTXCAP         0x08A00
+#define IXGBE_LSECRXCAP         0x08F00
+#define IXGBE_LSECTXCTRL        0x08A04
+#define IXGBE_LSECTXSCL         0x08A08 /* SCI Low */
+#define IXGBE_LSECTXSCH         0x08A0C /* SCI High */
+#define IXGBE_LSECTXSA          0x08A10
+#define IXGBE_LSECTXPN0         0x08A14
+#define IXGBE_LSECTXPN1         0x08A18
+#define IXGBE_LSECTXKEY0(_n)    (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECTXKEY1(_n)    (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */
+#define IXGBE_LSECRXCTRL        0x08F04
+#define IXGBE_LSECRXSCL         0x08F08
+#define IXGBE_LSECRXSCH         0x08F0C
+#define IXGBE_LSECRXSA(_i)      (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXPN(_i)      (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */
+#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m))))
+#define IXGBE_LSECTXUT          0x08A3C /* OutPktsUntagged */
+#define IXGBE_LSECTXPKTE        0x08A40 /* OutPktsEncrypted */
+#define IXGBE_LSECTXPKTP        0x08A44 /* OutPktsProtected */
+#define IXGBE_LSECTXOCTE        0x08A48 /* OutOctetsEncrypted */
+#define IXGBE_LSECTXOCTP        0x08A4C /* OutOctetsProtected */
+#define IXGBE_LSECRXUT          0x08F40 /* InPktsUntagged/InPktsNoTag */
+#define IXGBE_LSECRXOCTD        0x08F44 /* InOctetsDecrypted */
+#define IXGBE_LSECRXOCTV        0x08F48 /* InOctetsValidated */
+#define IXGBE_LSECRXBAD         0x08F4C /* InPktsBadTag */
+#define IXGBE_LSECRXNOSCI       0x08F50 /* InPktsNoSci */
+#define IXGBE_LSECRXUNSCI       0x08F54 /* InPktsUnknownSci */
+#define IXGBE_LSECRXUNCH        0x08F58 /* InPktsUnchecked */
+#define IXGBE_LSECRXDELAY       0x08F5C /* InPktsDelayed */
+#define IXGBE_LSECRXLATE        0x08F60 /* InPktsLate */
+#define IXGBE_LSECRXOK(_n)      (0x08F64 + (0x04 * (_n))) /* InPktsOk */
+#define IXGBE_LSECRXINV(_n)     (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */
+#define IXGBE_LSECRXNV(_n)      (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */
+#define IXGBE_LSECRXUNSA        0x08F7C /* InPktsUnusedSa */
+#define IXGBE_LSECRXNUSA        0x08F80 /* InPktsNotUsingSa */
+
+/* LinkSec (MacSec) Bit Fields and Masks */
+#define IXGBE_LSECTXCAP_SUM_MASK        0x00FF0000
+#define IXGBE_LSECTXCAP_SUM_SHIFT       16
+#define IXGBE_LSECRXCAP_SUM_MASK        0x00FF0000
+#define IXGBE_LSECRXCAP_SUM_SHIFT       16
+
+#define IXGBE_LSECTXCTRL_EN_MASK        0x00000003
+#define IXGBE_LSECTXCTRL_DISABLE        0x0
+#define IXGBE_LSECTXCTRL_AUTH           0x1
+#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT   0x2
+#define IXGBE_LSECTXCTRL_AISCI          0x00000020
+#define IXGBE_LSECTXCTRL_PNTHRSH_MASK   0xFFFFFF00
+#define IXGBE_LSECTXCTRL_RSV_MASK       0x000000D8
+
+#define IXGBE_LSECRXCTRL_EN_MASK        0x0000000C
+#define IXGBE_LSECRXCTRL_EN_SHIFT       2
+#define IXGBE_LSECRXCTRL_DISABLE        0x0
+#define IXGBE_LSECRXCTRL_CHECK          0x1
+#define IXGBE_LSECRXCTRL_STRICT         0x2
+#define IXGBE_LSECRXCTRL_DROP           0x3
+#define IXGBE_LSECRXCTRL_PLSH           0x00000040
+#define IXGBE_LSECRXCTRL_RP             0x00000080
+#define IXGBE_LSECRXCTRL_RSV_MASK       0xFFFFFF33
+
+/* IpSec Registers */
+#define IXGBE_IPSTXIDX          0x08900
+#define IXGBE_IPSTXSALT         0x08904
+#define IXGBE_IPSTXKEY(_i)      (0x08908 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXIDX          0x08E00
+#define IXGBE_IPSRXIPADDR(_i)   (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSPI          0x08E14
+#define IXGBE_IPSRXIPIDX        0x08E18
+#define IXGBE_IPSRXKEY(_i)      (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */
+#define IXGBE_IPSRXSALT         0x08E2C
+#define IXGBE_IPSRXMOD          0x08E30
+
+#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE    0x4
+
+/* DCB registers */
+#define IXGBE_RTRPCS      0x02430
+#define IXGBE_RTTDCS      0x04900
+#define IXGBE_RTTDCS_ARBDIS     0x00000040 /* DCB arbiter disable */
+#define IXGBE_RTTPCS      0x0CD00
+#define IXGBE_RTRUP2TC    0x03020
+#define IXGBE_RTTUP2TC    0x0C800
+#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_TXLLQ(_i)   (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_RTTDQSEL    0x04904
+#define IXGBE_RTTDT1C     0x04908
+#define IXGBE_RTTDT1S     0x0490C
+#define IXGBE_RTTDTECC    0x04990
+#define IXGBE_RTTDTECC_NO_BCN   0x00000100
+
+#define IXGBE_RTTBCNRC    0x04984
+#define IXGBE_RTTBCNRC_RS_ENA           0x80000000
+#define IXGBE_RTTBCNRC_RF_DEC_MASK      0x00003FFF
+#define IXGBE_RTTBCNRC_RF_INT_SHIFT     14
+#define IXGBE_RTTBCNRC_RF_INT_MASK \
+       (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT)
+#define IXGBE_RTTBCNRM    0x04980
+
+/* BCN (for DCB) Registers */
+#define IXGBE_RTTBCNRS    0x04988
+#define IXGBE_RTTBCNCR    0x08B00
+#define IXGBE_RTTBCNACH   0x08B04
+#define IXGBE_RTTBCNACL   0x08B08
+#define IXGBE_RTTBCNTG    0x04A90
+#define IXGBE_RTTBCNIDX   0x08B0C
+#define IXGBE_RTTBCNCP    0x08B10
+#define IXGBE_RTFRTIMER   0x08B14
+#define IXGBE_RTTBCNRTT   0x05150
+#define IXGBE_RTTBCNRD    0x0498C
+
+/* FCoE DMA Context Registers */
+#define IXGBE_FCPTRL    0x02410 /* FC User Desc. PTR Low */
+#define IXGBE_FCPTRH    0x02414 /* FC USer Desc. PTR High */
+#define IXGBE_FCBUFF    0x02418 /* FC Buffer Control */
+#define IXGBE_FCDMARW   0x02420 /* FC Receive DMA RW */
+#define IXGBE_FCINVST0  0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
+#define IXGBE_FCINVST(_i)       (IXGBE_FCINVST0 + ((_i) * 4))
+#define IXGBE_FCBUFF_VALID      (1 << 0)   /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE   (3 << 3)   /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX    (1 << 7)   /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_BUFFCNT    0x0000ff00 /* Number of User Buffers */
+#define IXGBE_FCBUFF_OFFSET     0xffff0000 /* User Buffer Offset */
+#define IXGBE_FCBUFF_BUFFSIZE_SHIFT  3
+#define IXGBE_FCBUFF_BUFFCNT_SHIFT   8
+#define IXGBE_FCBUFF_OFFSET_SHIFT    16
+#define IXGBE_FCDMARW_WE        (1 << 14)   /* Write enable */
+#define IXGBE_FCDMARW_RE        (1 << 15)   /* Read enable */
+#define IXGBE_FCDMARW_FCOESEL   0x000001ff  /* FC X_ID: 11 bits */
+#define IXGBE_FCDMARW_LASTSIZE  0xffff0000  /* Last User Buffer Size */
+#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
+/* FCoE SOF/EOF */
+#define IXGBE_TEOFF     0x04A94 /* Tx FC EOF */
+#define IXGBE_TSOFF     0x04A98 /* Tx FC SOF */
+#define IXGBE_REOFF     0x05158 /* Rx FC EOF */
+#define IXGBE_RSOFF     0x051F8 /* Rx FC SOF */
+/* FCoE Filter Context Registers */
+#define IXGBE_FCFLT     0x05108 /* FC FLT Context */
+#define IXGBE_FCFLTRW   0x05110 /* FC Filter RW Control */
+#define IXGBE_FCPARAM   0x051d8 /* FC Offset Parameter */
+#define IXGBE_FCFLT_VALID       (1 << 0)   /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST       (1 << 1)   /* Filter First */
+#define IXGBE_FCFLT_SEQID       0x00ff0000 /* Sequence ID */
+#define IXGBE_FCFLT_SEQCNT      0xff000000 /* Sequence Count */
+#define IXGBE_FCFLTRW_RVALDT    (1 << 13)  /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE        (1 << 14)  /* Write Enable */
+#define IXGBE_FCFLTRW_RE        (1 << 15)  /* Read Enable */
+/* FCoE Receive Control */
+#define IXGBE_FCRXCTRL  0x05100 /* FC Receive Control */
+#define IXGBE_FCRXCTRL_FCOELLI  (1 << 0)   /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD   (1 << 1)   /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH  (1 << 2)   /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3)   /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH     (1 << 4)   /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5)   /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC     (1 << 6)   /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO  (1 << 7)   /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOEVER  0x00000f00 /* FCoE Version: 4 bits */
+#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
+/* FCoE Redirection */
+#define IXGBE_FCRECTL   0x0ED00 /* FC Redirection Control */
+#define IXGBE_FCRETA0   0x0ED10 /* FC Redirection Table 0 */
+#define IXGBE_FCRETA(_i)        (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */
+#define IXGBE_FCRECTL_ENA       0x1        /* FCoE Redir Table Enable */
+#define IXGBE_FCRETASEL_ENA     0x2        /* FCoE FCRETASEL bit */
+#define IXGBE_FCRETA_SIZE       8          /* Max entries in FCRETA */
+#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */
+
+/* Stats registers */
+#define IXGBE_CRCERRS   0x04000
+#define IXGBE_ILLERRC   0x04004
+#define IXGBE_ERRBC     0x04008
+#define IXGBE_MSPDC     0x04010
+#define IXGBE_MPC(_i)   (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/
+#define IXGBE_MLFC      0x04034
+#define IXGBE_MRFC      0x04038
+#define IXGBE_RLEC      0x04040
+#define IXGBE_LXONTXC   0x03F60
+#define IXGBE_LXONRXC   0x0CF60
+#define IXGBE_LXOFFTXC  0x03F68
+#define IXGBE_LXOFFRXC  0x0CF68
+#define IXGBE_LXONRXCNT 0x041A4
+#define IXGBE_LXOFFRXCNT 0x041A8
+#define IXGBE_PXONRXCNT(_i)     (0x04140 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXOFFRXCNT(_i)    (0x04160 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXON2OFFCNT(_i)   (0x03240 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_PXONTXC(_i)       (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/
+#define IXGBE_PXONRXC(_i)       (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/
+#define IXGBE_PXOFFTXC(_i)      (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/
+#define IXGBE_PXOFFRXC(_i)      (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/
+#define IXGBE_PRC64     0x0405C
+#define IXGBE_PRC127    0x04060
+#define IXGBE_PRC255    0x04064
+#define IXGBE_PRC511    0x04068
+#define IXGBE_PRC1023   0x0406C
+#define IXGBE_PRC1522   0x04070
+#define IXGBE_GPRC      0x04074
+#define IXGBE_BPRC      0x04078
+#define IXGBE_MPRC      0x0407C
+#define IXGBE_GPTC      0x04080
+#define IXGBE_GORCL     0x04088
+#define IXGBE_GORCH     0x0408C
+#define IXGBE_GOTCL     0x04090
+#define IXGBE_GOTCH     0x04094
+#define IXGBE_RNBC(_i)  (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/
+#define IXGBE_RUC       0x040A4
+#define IXGBE_RFC       0x040A8
+#define IXGBE_ROC       0x040AC
+#define IXGBE_RJC       0x040B0
+#define IXGBE_MNGPRC    0x040B4
+#define IXGBE_MNGPDC    0x040B8
+#define IXGBE_MNGPTC    0x0CF90
+#define IXGBE_TORL      0x040C0
+#define IXGBE_TORH      0x040C4
+#define IXGBE_TPR       0x040D0
+#define IXGBE_TPT       0x040D4
+#define IXGBE_PTC64     0x040D8
+#define IXGBE_PTC127    0x040DC
+#define IXGBE_PTC255    0x040E0
+#define IXGBE_PTC511    0x040E4
+#define IXGBE_PTC1023   0x040E8
+#define IXGBE_PTC1522   0x040EC
+#define IXGBE_MPTC      0x040F0
+#define IXGBE_BPTC      0x040F4
+#define IXGBE_XEC       0x04120
+#define IXGBE_SSVPC     0x08780
+
+#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
+#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
+                         (0x08600 + ((_i) * 4)))
+#define IXGBE_TQSM(_i)  (0x08600 + ((_i) * 4))
+
+#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */
+#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */
+#define IXGBE_FCCRC     0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */
+#define IXGBE_FCOERPDC  0x0241C /* FCoE Rx Packets Dropped Count */
+#define IXGBE_FCLAST    0x02424 /* FCoE Last Error Count */
+#define IXGBE_FCOEPRC   0x02428 /* Number of FCoE Packets Received */
+#define IXGBE_FCOEDWRC  0x0242C /* Number of FCoE DWords Received */
+#define IXGBE_FCOEPTC   0x08784 /* Number of FCoE Packets Transmitted */
+#define IXGBE_FCOEDWTC  0x08788 /* Number of FCoE DWords Transmitted */
+#define IXGBE_FCCRC_CNT_MASK    0x0000FFFF /* CRC_CNT: bit 0 - 15 */
+#define IXGBE_FCLAST_CNT_MASK   0x0000FFFF /* Last_CNT: bit 0 - 15 */
+#define IXGBE_O2BGPTC   0x041C4
+#define IXGBE_O2BSPC    0x087B0
+#define IXGBE_B2OSPC    0x041C0
+#define IXGBE_B2OGPRC   0x02F90
+#define IXGBE_BUPRC     0x04180
+#define IXGBE_BMPRC     0x04184
+#define IXGBE_BBPRC     0x04188
+#define IXGBE_BUPTC     0x0418C
+#define IXGBE_BMPTC     0x04190
+#define IXGBE_BBPTC     0x04194
+#define IXGBE_BCRCERRS  0x04198
+#define IXGBE_BXONRXC   0x0419C
+#define IXGBE_BXOFFRXC  0x041E0
+#define IXGBE_BXONTXC   0x041E4
+#define IXGBE_BXOFFTXC  0x041E8
+#define IXGBE_PCRC8ECL  0x0E810
+#define IXGBE_PCRC8ECH  0x0E811
+#define IXGBE_PCRC8ECH_MASK     0x1F
+#define IXGBE_LDPCECL   0x0E820
+#define IXGBE_LDPCECH   0x0E821
+
+/* Management */
+#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MANC      0x05820
+#define IXGBE_MFVAL     0x05824
+#define IXGBE_MANC2H    0x05860
+#define IXGBE_MDEF(_i)  (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_MIPAF     0x058B0
+#define IXGBE_MMAL(_i)  (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_MMAH(_i)  (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */
+#define IXGBE_FTFT      0x09400 /* 0x9400-0x97FC */
+#define IXGBE_METF(_i)  (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */
+#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */
+#define IXGBE_LSWFW     0x15014
+#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */
+#define IXGBE_BMCIPVAL  0x05060
+#define IXGBE_BMCIP_IPADDR_TYPE         0x00000001
+#define IXGBE_BMCIP_IPADDR_VALID        0x00000002
+
+/* Management Bit Fields and Masks */
+#define IXGBE_MANC_EN_BMC2OS            0x10000000 /* Enable BMC2OS and OS2BMC
+                                                    * traffic */
+#define IXGBE_MANC_EN_BMC2OS_SHIFT      28
+
+/* Firmware Semaphore Register */
+#define IXGBE_FWSM_MODE_MASK  0xE
+
+/* ARC Subsystem registers */
+#define IXGBE_HICR      0x15F00
+#define IXGBE_FWSTS     0x15F0C
+#define IXGBE_HSMC0R    0x15F04
+#define IXGBE_HSMC1R    0x15F08
+#define IXGBE_SWSR      0x15F10
+#define IXGBE_HFDR      0x15FE8
+#define IXGBE_FLEX_MNG  0x15800 /* 0x15800 - 0x15EFC */
+
+#define IXGBE_HICR_EN              0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define IXGBE_HICR_C               0x02
+#define IXGBE_HICR_SV              0x04  /* Status Validity */
+#define IXGBE_HICR_FW_RESET_ENABLE 0x40
+#define IXGBE_HICR_FW_RESET        0x80
+
+/* PCI-E registers */
+#define IXGBE_GCR       0x11000
+#define IXGBE_GTV       0x11004
+#define IXGBE_FUNCTAG   0x11008
+#define IXGBE_GLT       0x1100C
+#define IXGBE_PCIEPIPEADR 0x11004
+#define IXGBE_PCIEPIPEDAT 0x11008
+#define IXGBE_GSCL_1    0x11010
+#define IXGBE_GSCL_2    0x11014
+#define IXGBE_GSCL_3    0x11018
+#define IXGBE_GSCL_4    0x1101C
+#define IXGBE_GSCN_0    0x11020
+#define IXGBE_GSCN_1    0x11024
+#define IXGBE_GSCN_2    0x11028
+#define IXGBE_GSCN_3    0x1102C
+#define IXGBE_FACTPS    0x10150
+#define IXGBE_PCIEANACTL  0x11040
+#define IXGBE_SWSM      0x10140
+#define IXGBE_FWSM      0x10148
+#define IXGBE_GSSR      0x10160
+#define IXGBE_MREVID    0x11064
+#define IXGBE_DCA_ID    0x11070
+#define IXGBE_DCA_CTRL  0x11074
+#define IXGBE_SWFW_SYNC IXGBE_GSSR
+
+/* PCI-E registers 82599-Specific */
+#define IXGBE_GCR_EXT           0x11050
+#define IXGBE_GSCL_5_82599      0x11030
+#define IXGBE_GSCL_6_82599      0x11034
+#define IXGBE_GSCL_7_82599      0x11038
+#define IXGBE_GSCL_8_82599      0x1103C
+#define IXGBE_PHYADR_82599      0x11040
+#define IXGBE_PHYDAT_82599      0x11044
+#define IXGBE_PHYCTL_82599      0x11048
+#define IXGBE_PBACLR_82599      0x11068
+#define IXGBE_CIAA_82599        0x11088
+#define IXGBE_CIAD_82599        0x1108C
+#define IXGBE_PICAUSE           0x110B0
+#define IXGBE_PIENA             0x110B8
+#define IXGBE_CDQ_MBR_82599     0x110B4
+#define IXGBE_PCIESPARE         0x110BC
+#define IXGBE_MISC_REG_82599    0x110F0
+#define IXGBE_ECC_CTRL_0_82599  0x11100
+#define IXGBE_ECC_CTRL_1_82599  0x11104
+#define IXGBE_ECC_STATUS_82599  0x110E0
+#define IXGBE_BAR_CTRL_82599    0x110F4
+
+/* PCI Express Control */
+#define IXGBE_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define IXGBE_GCR_CMPL_TMOUT_10ms       0x00001000
+#define IXGBE_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define IXGBE_GCR_CAP_VER2              0x00040000
+
+#define IXGBE_GCR_EXT_MSIX_EN           0x80000000
+#define IXGBE_GCR_EXT_BUFFERS_CLEAR     0x40000000
+#define IXGBE_GCR_EXT_VT_MODE_16        0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32        0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64        0x00000003
+#define IXGBE_GCR_EXT_SRIOV             (IXGBE_GCR_EXT_MSIX_EN | \
+                                         IXGBE_GCR_EXT_VT_MODE_64)
+/* Time Sync Registers */
+#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
+#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
+#define IXGBE_RXSTMPL    0x051E8 /* Rx timestamp Low - RO */
+#define IXGBE_RXSTMPH    0x051A4 /* Rx timestamp High - RO */
+#define IXGBE_RXSATRL    0x051A0 /* Rx timestamp attribute low - RO */
+#define IXGBE_RXSATRH    0x051A8 /* Rx timestamp attribute high - RO */
+#define IXGBE_RXMTRL     0x05120 /* RX message type register low - RW */
+#define IXGBE_TXSTMPL    0x08C04 /* Tx timestamp value Low - RO */
+#define IXGBE_TXSTMPH    0x08C08 /* Tx timestamp value High - RO */
+#define IXGBE_SYSTIML    0x08C0C /* System time register Low - RO */
+#define IXGBE_SYSTIMH    0x08C10 /* System time register High - RO */
+#define IXGBE_TIMINCA    0x08C14 /* Increment attributes register - RW */
+#define IXGBE_TIMADJL    0x08C18 /* Time Adjustment Offset register Low - RW */
+#define IXGBE_TIMADJH    0x08C1C /* Time Adjustment Offset register High - RW */
+#define IXGBE_TSAUXC     0x08C20 /* TimeSync Auxiliary Control register - RW */
+#define IXGBE_TRGTTIML0  0x08C24 /* Target Time Register 0 Low - RW */
+#define IXGBE_TRGTTIMH0  0x08C28 /* Target Time Register 0 High - RW */
+#define IXGBE_TRGTTIML1  0x08C2C /* Target Time Register 1 Low - RW */
+#define IXGBE_TRGTTIMH1  0x08C30 /* Target Time Register 1 High - RW */
+#define IXGBE_FREQOUT0   0x08C34 /* Frequency Out 0 Control register - RW */
+#define IXGBE_FREQOUT1   0x08C38 /* Frequency Out 1 Control register - RW */
+#define IXGBE_AUXSTMPL0  0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */
+#define IXGBE_AUXSTMPH0  0x08C40 /* Auxiliary Time Stamp 0 register High - RO */
+#define IXGBE_AUXSTMPL1  0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */
+#define IXGBE_AUXSTMPH1  0x08C48 /* Auxiliary Time Stamp 1 register High - RO */
+
+/* Diagnostic Registers */
+#define IXGBE_RDSTATCTL   0x02C20
+#define IXGBE_RDSTAT(_i)  (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */
+#define IXGBE_RDHMPN      0x02F08
+#define IXGBE_RIC_DW(_i)  (0x02F10 + ((_i) * 4))
+#define IXGBE_RDPROBE     0x02F20
+#define IXGBE_RDMAM       0x02F30
+#define IXGBE_RDMAD       0x02F34
+#define IXGBE_TDSTATCTL   0x07C20
+#define IXGBE_TDSTAT(_i)  (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */
+#define IXGBE_TDHMPN      0x07F08
+#define IXGBE_TDHMPN2     0x082FC
+#define IXGBE_TXDESCIC    0x082CC
+#define IXGBE_TIC_DW(_i)  (0x07F10 + ((_i) * 4))
+#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
+#define IXGBE_TDPROBE     0x07F20
+#define IXGBE_TXBUFCTRL   0x0C600
+#define IXGBE_TXBUFDATA0  0x0C610
+#define IXGBE_TXBUFDATA1  0x0C614
+#define IXGBE_TXBUFDATA2  0x0C618
+#define IXGBE_TXBUFDATA3  0x0C61C
+#define IXGBE_RXBUFCTRL   0x03600
+#define IXGBE_RXBUFDATA0  0x03610
+#define IXGBE_RXBUFDATA1  0x03614
+#define IXGBE_RXBUFDATA2  0x03618
+#define IXGBE_RXBUFDATA3  0x0361C
+#define IXGBE_PCIE_DIAG(_i)     (0x11090 + ((_i) * 4)) /* 8 of these */
+#define IXGBE_RFVAL     0x050A4
+#define IXGBE_MDFTC1    0x042B8
+#define IXGBE_MDFTC2    0x042C0
+#define IXGBE_MDFTFIFO1 0x042C4
+#define IXGBE_MDFTFIFO2 0x042C8
+#define IXGBE_MDFTS     0x042CC
+#define IXGBE_RXDATAWRPTR(_i)   (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/
+#define IXGBE_RXDESCWRPTR(_i)   (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/
+#define IXGBE_RXDATARDPTR(_i)   (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/
+#define IXGBE_RXDESCRDPTR(_i)   (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/
+#define IXGBE_TXDATAWRPTR(_i)   (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/
+#define IXGBE_TXDESCWRPTR(_i)   (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/
+#define IXGBE_TXDATARDPTR(_i)   (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/
+#define IXGBE_TXDESCRDPTR(_i)   (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/
+#define IXGBE_PCIEECCCTL 0x1106C
+#define IXGBE_RXWRPTR(_i)       (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/
+#define IXGBE_RXUSED(_i)        (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/
+#define IXGBE_RXRDPTR(_i)       (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/
+#define IXGBE_RXRDWRPTR(_i)     (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/
+#define IXGBE_TXWRPTR(_i)       (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/
+#define IXGBE_TXUSED(_i)        (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/
+#define IXGBE_TXRDPTR(_i)       (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/
+#define IXGBE_TXRDWRPTR(_i)     (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/
+#define IXGBE_PCIEECCCTL0 0x11100
+#define IXGBE_PCIEECCCTL1 0x11104
+#define IXGBE_RXDBUECC  0x03F70
+#define IXGBE_TXDBUECC  0x0CF70
+#define IXGBE_RXDBUEST 0x03F74
+#define IXGBE_TXDBUEST 0x0CF74
+#define IXGBE_PBTXECC   0x0C300
+#define IXGBE_PBRXECC   0x03300
+#define IXGBE_GHECCR    0x110B0
+
+/* MAC Registers */
+#define IXGBE_PCS1GCFIG 0x04200
+#define IXGBE_PCS1GLCTL 0x04208
+#define IXGBE_PCS1GLSTA 0x0420C
+#define IXGBE_PCS1GDBG0 0x04210
+#define IXGBE_PCS1GDBG1 0x04214
+#define IXGBE_PCS1GANA  0x04218
+#define IXGBE_PCS1GANLP 0x0421C
+#define IXGBE_PCS1GANNP 0x04220
+#define IXGBE_PCS1GANLPNP 0x04224
+#define IXGBE_HLREG0    0x04240
+#define IXGBE_HLREG1    0x04244
+#define IXGBE_PAP       0x04248
+#define IXGBE_MACA      0x0424C
+#define IXGBE_APAE      0x04250
+#define IXGBE_ARD       0x04254
+#define IXGBE_AIS       0x04258
+#define IXGBE_MSCA      0x0425C
+#define IXGBE_MSRWD     0x04260
+#define IXGBE_MLADD     0x04264
+#define IXGBE_MHADD     0x04268
+#define IXGBE_MAXFRS    0x04268
+#define IXGBE_TREG      0x0426C
+#define IXGBE_PCSS1     0x04288
+#define IXGBE_PCSS2     0x0428C
+#define IXGBE_XPCSS     0x04290
+#define IXGBE_MFLCN     0x04294
+#define IXGBE_SERDESC   0x04298
+#define IXGBE_MACS      0x0429C
+#define IXGBE_AUTOC     0x042A0
+#define IXGBE_LINKS     0x042A4
+#define IXGBE_LINKS2    0x04324
+#define IXGBE_AUTOC2    0x042A8
+#define IXGBE_AUTOC3    0x042AC
+#define IXGBE_ANLP1     0x042B0
+#define IXGBE_ANLP2     0x042B4
+#define IXGBE_MACC      0x04330
+#define IXGBE_ATLASCTL  0x04800
+#define IXGBE_MMNGC     0x042D0
+#define IXGBE_ANLPNP1   0x042D4
+#define IXGBE_ANLPNP2   0x042D8
+#define IXGBE_KRPCSFC   0x042E0
+#define IXGBE_KRPCSS    0x042E4
+#define IXGBE_FECS1     0x042E8
+#define IXGBE_FECS2     0x042EC
+#define IXGBE_SMADARCTL 0x14F10
+#define IXGBE_MPVC      0x04318
+#define IXGBE_SGMIIC    0x04314
+
+/* Statistics Registers */
+#define IXGBE_RXNFGPC      0x041B0
+#define IXGBE_RXNFGBCL     0x041B4
+#define IXGBE_RXNFGBCH     0x041B8
+#define IXGBE_RXDGPC       0x02F50
+#define IXGBE_RXDGBCL      0x02F54
+#define IXGBE_RXDGBCH      0x02F58
+#define IXGBE_RXDDGPC      0x02F5C
+#define IXGBE_RXDDGBCL     0x02F60
+#define IXGBE_RXDDGBCH     0x02F64
+#define IXGBE_RXLPBKGPC    0x02F68
+#define IXGBE_RXLPBKGBCL   0x02F6C
+#define IXGBE_RXLPBKGBCH   0x02F70
+#define IXGBE_RXDLPBKGPC   0x02F74
+#define IXGBE_RXDLPBKGBCL  0x02F78
+#define IXGBE_RXDLPBKGBCH  0x02F7C
+#define IXGBE_TXDGPC       0x087A0
+#define IXGBE_TXDGBCL      0x087A4
+#define IXGBE_TXDGBCH      0x087A8
+
+#define IXGBE_RXDSTATCTRL 0x02F40
+
+/* Copper Pond 2 link timeout */
+#define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50
+
+/* Omer CORECTL */
+#define IXGBE_CORECTL           0x014F00
+/* BARCTRL */
+#define IXGBE_BARCTRL               0x110F4
+#define IXGBE_BARCTRL_FLSIZE        0x0700
+#define IXGBE_BARCTRL_FLSIZE_SHIFT  8
+#define IXGBE_BARCTRL_CSRSIZE       0x2000
+
+/* RSCCTL Bit Masks */
+#define IXGBE_RSCCTL_RSCEN          0x01
+#define IXGBE_RSCCTL_MAXDESC_1      0x00
+#define IXGBE_RSCCTL_MAXDESC_4      0x04
+#define IXGBE_RSCCTL_MAXDESC_8      0x08
+#define IXGBE_RSCCTL_MAXDESC_16     0x0C
+
+/* RSCDBU Bit Masks */
+#define IXGBE_RSCDBU_RSCSMALDIS_MASK    0x0000007F
+#define IXGBE_RSCDBU_RSCACKDIS          0x00000080
+
+/* RDRXCTL Bit Masks */
+#define IXGBE_RDRXCTL_RDMTS_1_2     0x00000000 /* Rx Desc Min Threshold Size */
+#define IXGBE_RDRXCTL_CRCSTRIP      0x00000002 /* CRC Strip */
+#define IXGBE_RDRXCTL_MVMEN         0x00000020
+#define IXGBE_RDRXCTL_DMAIDONE      0x00000008 /* DMA init cycle done */
+#define IXGBE_RDRXCTL_AGGDIS        0x00010000 /* Aggregation disable */
+#define IXGBE_RDRXCTL_RSCFRSTSIZE   0x003E0000 /* RSC First packet size */
+#define IXGBE_RDRXCTL_RSCLLIDIS     0x00800000 /* Disable RSC compl on LLI */
+#define IXGBE_RDRXCTL_RSCACKC       0x02000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_FCOE_WRFIX    0x04000000 /* must set 1 when RSC enabled */
+
+/* RQTC Bit Masks and Shifts */
+#define IXGBE_RQTC_SHIFT_TC(_i)     ((_i) * 4)
+#define IXGBE_RQTC_TC0_MASK         (0x7 << 0)
+#define IXGBE_RQTC_TC1_MASK         (0x7 << 4)
+#define IXGBE_RQTC_TC2_MASK         (0x7 << 8)
+#define IXGBE_RQTC_TC3_MASK         (0x7 << 12)
+#define IXGBE_RQTC_TC4_MASK         (0x7 << 16)
+#define IXGBE_RQTC_TC5_MASK         (0x7 << 20)
+#define IXGBE_RQTC_TC6_MASK         (0x7 << 24)
+#define IXGBE_RQTC_TC7_MASK         (0x7 << 28)
+
+/* PSRTYPE.RQPL Bit masks and shift */
+#define IXGBE_PSRTYPE_RQPL_MASK     0x7
+#define IXGBE_PSRTYPE_RQPL_SHIFT    29
+
+/* CTRL Bit Masks */
+#define IXGBE_CTRL_GIO_DIS      0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_LNK_RST      0x00000008 /* Link Reset. Resets everything. */
+#define IXGBE_CTRL_RST          0x04000000 /* Reset (SW) */
+#define IXGBE_CTRL_RST_MASK     (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
+
+/* FACTPS */
+#define IXGBE_FACTPS_LFS        0x40000000 /* LAN Function Select */
+
+/* MHADD Bit Masks */
+#define IXGBE_MHADD_MFS_MASK    0xFFFF0000
+#define IXGBE_MHADD_MFS_SHIFT   16
+
+/* Extended Device Control */
+#define IXGBE_CTRL_EXT_PFRSTD   0x00004000 /* Physical Function Reset Done */
+#define IXGBE_CTRL_EXT_NS_DIS   0x00010000 /* No Snoop disable */
+#define IXGBE_CTRL_EXT_RO_DIS   0x00020000 /* Relaxed Ordering disable */
+#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
+
+/* Direct Cache Access (DCA) definitions */
+#define IXGBE_DCA_CTRL_DCA_ENABLE  0x00000000 /* DCA Enable */
+#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */
+
+#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */
+#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
+
+#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599  0xFF000000 /* Rx CPUID Mask */
+#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_WRO_EN (1 << 13) /* DCA Rx wr Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_HSRO_EN (1 << 15) /* DCA Rx Split Header RO */
+
+#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599  0xFF000000 /* Tx CPUID Mask */
+#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_MAX_QUEUES_82598   16 /* DCA regs only on 16 queues */
+
+/* MSCA Bit Masks */
+#define IXGBE_MSCA_NP_ADDR_MASK      0x0000FFFF /* MDI Address (new protocol) */
+#define IXGBE_MSCA_NP_ADDR_SHIFT     0
+#define IXGBE_MSCA_DEV_TYPE_MASK     0x001F0000 /* Device Type (new protocol) */
+#define IXGBE_MSCA_DEV_TYPE_SHIFT    16 /* Register Address (old protocol */
+#define IXGBE_MSCA_PHY_ADDR_MASK     0x03E00000 /* PHY Address mask */
+#define IXGBE_MSCA_PHY_ADDR_SHIFT    21 /* PHY Address shift*/
+#define IXGBE_MSCA_OP_CODE_MASK      0x0C000000 /* OP CODE mask */
+#define IXGBE_MSCA_OP_CODE_SHIFT     26 /* OP CODE shift */
+#define IXGBE_MSCA_ADDR_CYCLE        0x00000000 /* OP CODE 00 (addr cycle) */
+#define IXGBE_MSCA_WRITE             0x04000000 /* OP CODE 01 (write) */
+#define IXGBE_MSCA_READ              0x0C000000 /* OP CODE 11 (read) */
+#define IXGBE_MSCA_READ_AUTOINC      0x08000000 /* OP CODE 10 (read, auto inc)*/
+#define IXGBE_MSCA_ST_CODE_MASK      0x30000000 /* ST Code mask */
+#define IXGBE_MSCA_ST_CODE_SHIFT     28 /* ST Code shift */
+#define IXGBE_MSCA_NEW_PROTOCOL      0x00000000 /* ST CODE 00 (new protocol) */
+#define IXGBE_MSCA_OLD_PROTOCOL      0x10000000 /* ST CODE 01 (old protocol) */
+#define IXGBE_MSCA_MDI_COMMAND       0x40000000 /* Initiate MDI command */
+#define IXGBE_MSCA_MDI_IN_PROG_EN    0x80000000 /* MDI in progress enable */
+
+/* MSRWD bit masks */
+#define IXGBE_MSRWD_WRITE_DATA_MASK     0x0000FFFF
+#define IXGBE_MSRWD_WRITE_DATA_SHIFT    0
+#define IXGBE_MSRWD_READ_DATA_MASK      0xFFFF0000
+#define IXGBE_MSRWD_READ_DATA_SHIFT     16
+
+/* Atlas registers */
+#define IXGBE_ATLAS_PDN_LPBK    0x24
+#define IXGBE_ATLAS_PDN_10G     0xB
+#define IXGBE_ATLAS_PDN_1G      0xC
+#define IXGBE_ATLAS_PDN_AN      0xD
+
+/* Atlas bit masks */
+#define IXGBE_ATLASCTL_WRITE_CMD        0x00010000
+#define IXGBE_ATLAS_PDN_TX_REG_EN       0x10
+#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL   0xF0
+#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL    0xF0
+#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL    0xF0
+
+/* Omer bit masks */
+#define IXGBE_CORECTL_WRITE_CMD         0x00010000
+
+/* Device Type definitions for new protocol MDIO commands */
+#define IXGBE_MDIO_PMA_PMD_DEV_TYPE               0x1
+#define IXGBE_MDIO_PCS_DEV_TYPE                   0x3
+#define IXGBE_MDIO_PHY_XS_DEV_TYPE                0x4
+#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE              0x7
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE     0x1E   /* Device 30 */
+#define IXGBE_TWINAX_DEV                          1
+
+#define IXGBE_MDIO_COMMAND_TIMEOUT     100 /* PHY Timeout for 1 GB mode */
+
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL      0x0    /* VS1 Control Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS       0x1    /* VS1 Status Reg */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS  0x0008 /* 1 = Link Up */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED    0x0018
+#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED     0x0010
+
+#define IXGBE_MDIO_AUTO_NEG_CONTROL    0x0 /* AUTO_NEG Control Reg */
+#define IXGBE_MDIO_AUTO_NEG_STATUS     0x1 /* AUTO_NEG Status Reg */
+#define IXGBE_MDIO_AUTO_NEG_ADVT       0x10 /* AUTO_NEG Advt Reg */
+#define IXGBE_MDIO_AUTO_NEG_LP         0x13 /* AUTO_NEG LP Status Reg */
+#define IXGBE_MDIO_PHY_XS_CONTROL      0x0 /* PHY_XS Control Reg */
+#define IXGBE_MDIO_PHY_XS_RESET        0x8000 /* PHY_XS Reset */
+#define IXGBE_MDIO_PHY_ID_HIGH         0x2 /* PHY ID High Reg*/
+#define IXGBE_MDIO_PHY_ID_LOW          0x3 /* PHY ID Low Reg*/
+#define IXGBE_MDIO_PHY_SPEED_ABILITY   0x4 /* Speed Ability Reg */
+#define IXGBE_MDIO_PHY_SPEED_10G       0x0001 /* 10G capable */
+#define IXGBE_MDIO_PHY_SPEED_1G        0x0010 /* 1G capable */
+#define IXGBE_MDIO_PHY_SPEED_100M      0x0020 /* 100M capable */
+#define IXGBE_MDIO_PHY_EXT_ABILITY        0xB /* Ext Ability Reg */
+#define IXGBE_MDIO_PHY_10GBASET_ABILITY   0x0004 /* 10GBaseT capable */
+#define IXGBE_MDIO_PHY_1000BASET_ABILITY  0x0020 /* 1000BaseT capable */
+#define IXGBE_MDIO_PHY_100BASETX_ABILITY  0x0080 /* 100BaseTX capable */
+#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */
+
+#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR     0x0000 /* PMA/PMD Control Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR     0xC30A /* PHY_XS SDA/SCL Addr Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA     0xC30B /* PHY_XS SDA/SCL Data Reg */
+#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT     0xC30C /* PHY_XS SDA/SCL Status Reg */
+
+/* MII clause 22/28 definitions */
+#define IXGBE_MDIO_PHY_LOW_POWER_MODE  0x0800
+
+#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG     0x20   /* 10G Control Reg */
+#define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */
+#define IXGBE_MII_AUTONEG_XNP_TX_REG             0x17   /* 1G XNP Transmit */
+#define IXGBE_MII_AUTONEG_ADVERTISE_REG          0x10   /* 100M Advertisement */
+#define IXGBE_MII_10GBASE_T_ADVERTISE            0x1000 /* full duplex, bit:12*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX      0x4000 /* full duplex, bit:14*/
+#define IXGBE_MII_1GBASE_T_ADVERTISE             0x8000 /* full duplex, bit:15*/
+#define IXGBE_MII_100BASE_T_ADVERTISE            0x0100 /* full duplex, bit:8 */
+#define IXGBE_MII_100BASE_T_ADVERTISE_HALF       0x0080 /* half duplex, bit:7 */
+#define IXGBE_MII_RESTART                        0x200
+#define IXGBE_MII_AUTONEG_COMPLETE               0x20
+#define IXGBE_MII_AUTONEG_LINK_UP                0x04
+#define IXGBE_MII_AUTONEG_REG                    0x0
+
+#define IXGBE_PHY_REVISION_MASK        0xFFFFFFF0
+#define IXGBE_MAX_PHY_ADDR             32
+
+/* PHY IDs*/
+#define TN1010_PHY_ID    0x00A19410
+#define TNX_FW_REV       0xB
+#define X540_PHY_ID      0x01540200
+#define AQ_FW_REV        0x20
+#define QT2022_PHY_ID    0x0043A400
+#define ATH_PHY_ID       0x03429050
+
+/* PHY Types */
+#define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
+
+/* Special PHY Init Routine */
+#define IXGBE_PHY_INIT_OFFSET_NL 0x002B
+#define IXGBE_PHY_INIT_END_NL    0xFFFF
+#define IXGBE_CONTROL_MASK_NL    0xF000
+#define IXGBE_DATA_MASK_NL       0x0FFF
+#define IXGBE_CONTROL_SHIFT_NL   12
+#define IXGBE_DELAY_NL           0
+#define IXGBE_DATA_NL            1
+#define IXGBE_CONTROL_NL         0x000F
+#define IXGBE_CONTROL_EOL_NL     0x0FFF
+#define IXGBE_CONTROL_SOL_NL     0x0000
+
+/* General purpose Interrupt Enable */
+#define IXGBE_SDP0_GPIEN         0x00000001 /* SDP0 */
+#define IXGBE_SDP1_GPIEN         0x00000002 /* SDP1 */
+#define IXGBE_SDP2_GPIEN         0x00000004 /* SDP2 */
+#define IXGBE_GPIE_MSIX_MODE     0x00000010 /* MSI-X mode */
+#define IXGBE_GPIE_OCD           0x00000020 /* Other Clear Disable */
+#define IXGBE_GPIE_EIMEN         0x00000040 /* Immediate Interrupt Enable */
+#define IXGBE_GPIE_EIAME         0x40000000
+#define IXGBE_GPIE_PBA_SUPPORT   0x80000000
+#define IXGBE_GPIE_RSC_DELAY_SHIFT 11
+#define IXGBE_GPIE_VTMODE_MASK   0x0000C000 /* VT Mode Mask */
+#define IXGBE_GPIE_VTMODE_16     0x00004000 /* 16 VFs 8 queues per VF */
+#define IXGBE_GPIE_VTMODE_32     0x00008000 /* 32 VFs 4 queues per VF */
+#define IXGBE_GPIE_VTMODE_64     0x0000C000 /* 64 VFs 2 queues per VF */
+
+/* Packet Buffer Initialization */
+#define IXGBE_MAX_PACKET_BUFFERS 8
+
+#define IXGBE_TXPBSIZE_20KB     0x00005000 /* 20KB Packet Buffer */
+#define IXGBE_TXPBSIZE_40KB     0x0000A000 /* 40KB Packet Buffer */
+#define IXGBE_RXPBSIZE_48KB     0x0000C000 /* 48KB Packet Buffer */
+#define IXGBE_RXPBSIZE_64KB     0x00010000 /* 64KB Packet Buffer */
+#define IXGBE_RXPBSIZE_80KB     0x00014000 /* 80KB Packet Buffer */
+#define IXGBE_RXPBSIZE_128KB    0x00020000 /* 128KB Packet Buffer */
+#define IXGBE_RXPBSIZE_MAX      0x00080000 /* 512KB Packet Buffer */
+#define IXGBE_TXPBSIZE_MAX      0x00028000 /* 160KB Packet Buffer */
+
+#define IXGBE_TXPKT_SIZE_MAX    0xA        /* Max Tx Packet size */
+#define IXGBE_MAX_PB            8
+
+/* Packet buffer allocation strategies */
+enum {
+       PBA_STRATEGY_EQUAL      = 0, /* Distribute PB space equally */
+#define PBA_STRATEGY_EQUAL      PBA_STRATEGY_EQUAL
+       PBA_STRATEGY_WEIGHTED   = 1, /* Weight front half of TCs */
+#define PBA_STRATEGY_WEIGHTED   PBA_STRATEGY_WEIGHTED
+};
+
+/* Transmit Flow Control status */
+#define IXGBE_TFCS_TXOFF         0x00000001
+#define IXGBE_TFCS_TXOFF0        0x00000100
+#define IXGBE_TFCS_TXOFF1        0x00000200
+#define IXGBE_TFCS_TXOFF2        0x00000400
+#define IXGBE_TFCS_TXOFF3        0x00000800
+#define IXGBE_TFCS_TXOFF4        0x00001000
+#define IXGBE_TFCS_TXOFF5        0x00002000
+#define IXGBE_TFCS_TXOFF6        0x00004000
+#define IXGBE_TFCS_TXOFF7        0x00008000
+
+/* TCP Timer */
+#define IXGBE_TCPTIMER_KS            0x00000100
+#define IXGBE_TCPTIMER_COUNT_ENABLE  0x00000200
+#define IXGBE_TCPTIMER_COUNT_FINISH  0x00000400
+#define IXGBE_TCPTIMER_LOOP          0x00000800
+#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF
+
+/* HLREG0 Bit Masks */
+#define IXGBE_HLREG0_TXCRCEN      0x00000001   /* bit  0 */
+#define IXGBE_HLREG0_RXCRCSTRP    0x00000002   /* bit  1 */
+#define IXGBE_HLREG0_JUMBOEN      0x00000004   /* bit  2 */
+#define IXGBE_HLREG0_TXPADEN      0x00000400   /* bit 10 */
+#define IXGBE_HLREG0_TXPAUSEEN    0x00001000   /* bit 12 */
+#define IXGBE_HLREG0_RXPAUSEEN    0x00004000   /* bit 14 */
+#define IXGBE_HLREG0_LPBK         0x00008000   /* bit 15 */
+#define IXGBE_HLREG0_MDCSPD       0x00010000   /* bit 16 */
+#define IXGBE_HLREG0_CONTMDC      0x00020000   /* bit 17 */
+#define IXGBE_HLREG0_CTRLFLTR     0x00040000   /* bit 18 */
+#define IXGBE_HLREG0_PREPEND      0x00F00000   /* bits 20-23 */
+#define IXGBE_HLREG0_PRIPAUSEEN   0x01000000   /* bit 24 */
+#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000   /* bits 25-26 */
+#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000   /* bit 27 */
+#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000   /* bit 28 */
+
+/* VMD_CTL bitmasks */
+#define IXGBE_VMD_CTL_VMDQ_EN     0x00000001
+#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002
+
+/* VT_CTL bitmasks */
+#define IXGBE_VT_CTL_DIS_DEFPL  0x20000000 /* disable default pool */
+#define IXGBE_VT_CTL_REPLEN     0x40000000 /* replication enabled */
+#define IXGBE_VT_CTL_VT_ENABLE  0x00000001  /* Enable VT Mode */
+#define IXGBE_VT_CTL_POOL_SHIFT 7
+#define IXGBE_VT_CTL_POOL_MASK  (0x3F << IXGBE_VT_CTL_POOL_SHIFT)
+
+/* VMOLR bitmasks */
+#define IXGBE_VMOLR_AUPE        0x01000000 /* accept untagged packets */
+#define IXGBE_VMOLR_ROMPE       0x02000000 /* accept packets in MTA tbl */
+#define IXGBE_VMOLR_ROPE        0x04000000 /* accept packets in UC tbl */
+#define IXGBE_VMOLR_BAM         0x08000000 /* accept broadcast packets */
+#define IXGBE_VMOLR_MPE         0x10000000 /* multicast promiscuous */
+
+/* VFRE bitmask */
+#define IXGBE_VFRE_ENABLE_ALL   0xFFFFFFFF
+
+#define IXGBE_VF_INIT_TIMEOUT   200 /* Number of retries to clear RSTI */
+
+/* RDHMPN and TDHMPN bitmasks */
+#define IXGBE_RDHMPN_RDICADDR       0x007FF800
+#define IXGBE_RDHMPN_RDICRDREQ      0x00800000
+#define IXGBE_RDHMPN_RDICADDR_SHIFT 11
+#define IXGBE_TDHMPN_TDICADDR       0x003FF800
+#define IXGBE_TDHMPN_TDICRDREQ      0x00800000
+#define IXGBE_TDHMPN_TDICADDR_SHIFT 11
+
+#define IXGBE_RDMAM_MEM_SEL_SHIFT   13
+#define IXGBE_RDMAM_DWORD_SHIFT     9
+#define IXGBE_RDMAM_DESC_COMP_FIFO  1
+#define IXGBE_RDMAM_DFC_CMD_FIFO    2
+#define IXGBE_RDMAM_RSC_HEADER_ADDR 3
+#define IXGBE_RDMAM_TCN_STATUS_RAM  4
+#define IXGBE_RDMAM_WB_COLL_FIFO    5
+#define IXGBE_RDMAM_QSC_CNT_RAM     6
+#define IXGBE_RDMAM_QSC_FCOE_RAM    7
+#define IXGBE_RDMAM_QSC_QUEUE_CNT   8
+#define IXGBE_RDMAM_QSC_QUEUE_RAM   0xA
+#define IXGBE_RDMAM_QSC_RSC_RAM     0xB
+#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE     135
+#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT     4
+#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE      48
+#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT      7
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE   32
+#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT   4
+#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE    256
+#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT    9
+#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE      8
+#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT      4
+#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE       64
+#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT       4
+#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE      512
+#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT      5
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE     32
+#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT     4
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE     128
+#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT     8
+#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE       32
+#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT       8
+
+#define IXGBE_TXDESCIC_READY        0x80000000
+
+/* Receive Checksum Control */
+#define IXGBE_RXCSUM_IPPCSE     0x00001000   /* IP payload checksum enable */
+#define IXGBE_RXCSUM_PCSD       0x00002000   /* packet checksum disabled */
+
+/* FCRTL Bit Masks */
+#define IXGBE_FCRTL_XONE        0x80000000  /* XON enable */
+#define IXGBE_FCRTH_FCEN        0x80000000  /* Packet buffer fc enable */
+
+/* PAP bit masks*/
+#define IXGBE_PAP_TXPAUSECNT_MASK   0x0000FFFF /* Pause counter mask */
+
+/* RMCS Bit Masks */
+#define IXGBE_RMCS_RRM          0x00000002 /* Receive Recycle Mode enable */
+/* Receive Arbitration Control: 0 Round Robin, 1 DFP */
+#define IXGBE_RMCS_RAC          0x00000004
+#define IXGBE_RMCS_DFP          IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */
+#define IXGBE_RMCS_TFCE_802_3X         0x00000008 /* Tx Priority FC ena */
+#define IXGBE_RMCS_TFCE_PRIORITY       0x00000010 /* Tx Priority FC ena */
+#define IXGBE_RMCS_ARBDIS       0x00000040 /* Arbitration disable bit */
+
+/* FCCFG Bit Masks */
+#define IXGBE_FCCFG_TFCE_802_3X         0x00000008 /* Tx link FC enable */
+#define IXGBE_FCCFG_TFCE_PRIORITY       0x00000010 /* Tx priority FC enable */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE    0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_FLOW_DIR     0x00010000 /* FDir Exception */
+#define IXGBE_EICR_RX_MISS      0x00020000 /* Packet Buffer Overrun */
+#define IXGBE_EICR_PCI          0x00040000 /* PCI Exception */
+#define IXGBE_EICR_MAILBOX      0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_LSC          0x00100000 /* Link Status Change */
+#define IXGBE_EICR_LINKSEC      0x00200000 /* PN Threshold */
+#define IXGBE_EICR_MNG          0x00400000 /* Manageability Event Interrupt */
+#define IXGBE_EICR_TS           0x00800000 /* Thermal Sensor Event */
+#define IXGBE_EICR_GPI_SDP0     0x01000000 /* Gen Purpose Interrupt on SDP0 */
+#define IXGBE_EICR_GPI_SDP1     0x02000000 /* Gen Purpose Interrupt on SDP1 */
+#define IXGBE_EICR_GPI_SDP2     0x04000000 /* Gen Purpose Interrupt on SDP2 */
+#define IXGBE_EICR_ECC          0x10000000 /* ECC Error */
+#define IXGBE_EICR_PBUR         0x10000000 /* Packet Buffer Handler Error */
+#define IXGBE_EICR_DHER         0x20000000 /* Descriptor Handler Error */
+#define IXGBE_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define IXGBE_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
+#define IXGBE_EICS_RX_MISS      IXGBE_EICR_RX_MISS   /* Pkt Buffer Overrun */
+#define IXGBE_EICS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
+#define IXGBE_EICS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EICS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EICS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EICS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EICS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EICS_ECC          IXGBE_EICR_ECC       /* ECC Error */
+#define IXGBE_EICS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EICS_DHER         IXGBE_EICR_DHER      /* Desc Handler Error */
+#define IXGBE_EICS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EICS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
+#define IXGBE_EIMS_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */
+#define IXGBE_EIMS_PCI          IXGBE_EICR_PCI       /* PCI Exception */
+#define IXGBE_EIMS_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EIMS_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EIMS_TS           IXGBE_EICR_TS        /* Thermal Sensor Event */
+#define IXGBE_EIMS_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMS_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMS_ECC          IXGBE_EICR_ECC       /* ECC Error */
+#define IXGBE_EIMS_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EIMS_DHER         IXGBE_EICR_DHER      /* Descr Handler Error */
+#define IXGBE_EIMS_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMS_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE    IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_FLOW_DIR     IXGBE_EICR_FLOW_DIR  /* FDir Exception */
+#define IXGBE_EIMC_RX_MISS      IXGBE_EICR_RX_MISS   /* Packet Buffer Overrun */
+#define IXGBE_EIMC_PCI          IXGBE_EICR_PCI       /* PCI Exception */
+#define IXGBE_EIMC_MAILBOX      IXGBE_EICR_MAILBOX   /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_LSC          IXGBE_EICR_LSC       /* Link Status Change */
+#define IXGBE_EIMC_MNG          IXGBE_EICR_MNG       /* MNG Event Interrupt */
+#define IXGBE_EIMC_GPI_SDP0     IXGBE_EICR_GPI_SDP0  /* SDP0 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP1     IXGBE_EICR_GPI_SDP1  /* SDP1 Gen Purpose Int */
+#define IXGBE_EIMC_GPI_SDP2     IXGBE_EICR_GPI_SDP2  /* SDP2 Gen Purpose Int */
+#define IXGBE_EIMC_ECC          IXGBE_EICR_ECC       /* ECC Error */
+#define IXGBE_EIMC_PBUR         IXGBE_EICR_PBUR      /* Pkt Buf Handler Err */
+#define IXGBE_EIMC_DHER         IXGBE_EICR_DHER      /* Desc Handler Err */
+#define IXGBE_EIMC_TCP_TIMER    IXGBE_EICR_TCP_TIMER /* TCP Timer */
+#define IXGBE_EIMC_OTHER        IXGBE_EICR_OTHER     /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+                                IXGBE_EIMS_RTX_QUEUE       | \
+                                IXGBE_EIMS_LSC             | \
+                                IXGBE_EIMS_TCP_TIMER       | \
+                                IXGBE_EIMS_OTHER)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define IXGBE_IMIR_PORT_IM_EN     0x00010000  /* TCP port enable */
+#define IXGBE_IMIR_PORT_BP        0x00020000  /* TCP port check bypass */
+#define IXGBE_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define IXGBE_IMIREXT_CTRL_URG    0x00002000  /* Check URG bit in header */
+#define IXGBE_IMIREXT_CTRL_ACK    0x00004000  /* Check ACK bit in header */
+#define IXGBE_IMIREXT_CTRL_PSH    0x00008000  /* Check PSH bit in header */
+#define IXGBE_IMIREXT_CTRL_RST    0x00010000  /* Check RST bit in header */
+#define IXGBE_IMIREXT_CTRL_SYN    0x00020000  /* Check SYN bit in header */
+#define IXGBE_IMIREXT_CTRL_FIN    0x00040000  /* Check FIN bit in header */
+#define IXGBE_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of control bits */
+#define IXGBE_IMIR_SIZE_BP_82599  0x00001000 /* Packet size bypass */
+#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */
+#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */
+#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */
+#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */
+#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */
+#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */
+#define IXGBE_IMIR_CTRL_BP_82599  0x00080000 /* Bypass check of control bits */
+#define IXGBE_IMIR_LLI_EN_82599   0x00100000 /* Enables low latency Int */
+#define IXGBE_IMIR_RX_QUEUE_MASK_82599  0x0000007F /* Rx Queue Mask */
+#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */
+#define IXGBE_IMIRVP_PRIORITY_MASK      0x00000007 /* VLAN priority mask */
+#define IXGBE_IMIRVP_PRIORITY_EN        0x00000008 /* VLAN priority enable */
+
+#define IXGBE_MAX_FTQF_FILTERS          128
+#define IXGBE_FTQF_PROTOCOL_MASK        0x00000003
+#define IXGBE_FTQF_PROTOCOL_TCP         0x00000000
+#define IXGBE_FTQF_PROTOCOL_UDP         0x00000001
+#define IXGBE_FTQF_PROTOCOL_SCTP        2
+#define IXGBE_FTQF_PRIORITY_MASK        0x00000007
+#define IXGBE_FTQF_PRIORITY_SHIFT       2
+#define IXGBE_FTQF_POOL_MASK            0x0000003F
+#define IXGBE_FTQF_POOL_SHIFT           8
+#define IXGBE_FTQF_5TUPLE_MASK_MASK     0x0000001F
+#define IXGBE_FTQF_5TUPLE_MASK_SHIFT    25
+#define IXGBE_FTQF_SOURCE_ADDR_MASK     0x1E
+#define IXGBE_FTQF_DEST_ADDR_MASK       0x1D
+#define IXGBE_FTQF_SOURCE_PORT_MASK     0x1B
+#define IXGBE_FTQF_DEST_PORT_MASK       0x17
+#define IXGBE_FTQF_PROTOCOL_COMP_MASK   0x0F
+#define IXGBE_FTQF_POOL_MASK_EN         0x40000000
+#define IXGBE_FTQF_QUEUE_ENABLE         0x80000000
+
+/* Interrupt clear mask */
+#define IXGBE_IRQ_CLEAR_MASK    0xFFFFFFFF
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_REG_NUM      25
+#define IXGBE_IVAR_REG_NUM_82599           64
+#define IXGBE_IVAR_TXRX_ENTRY   96
+#define IXGBE_IVAR_RX_ENTRY     64
+#define IXGBE_IVAR_RX_QUEUE(_i)    (0 + (_i))
+#define IXGBE_IVAR_TX_QUEUE(_i)    (64 + (_i))
+#define IXGBE_IVAR_TX_ENTRY     32
+
+#define IXGBE_IVAR_TCP_TIMER_INDEX       96 /* 0 based index */
+#define IXGBE_IVAR_OTHER_CAUSES_INDEX    97 /* 0 based index */
+
+#define IXGBE_MSIX_VECTOR(_i)   (0 + (_i))
+
+#define IXGBE_IVAR_ALLOC_VAL    0x80 /* Interrupt Allocation valid */
+
+/* ETYPE Queue Filter/Select Bit Masks */
+#define IXGBE_MAX_ETQF_FILTERS  8
+#define IXGBE_ETQF_FCOE         0x08000000 /* bit 27 */
+#define IXGBE_ETQF_BCN          0x10000000 /* bit 28 */
+#define IXGBE_ETQF_1588         0x40000000 /* bit 30 */
+#define IXGBE_ETQF_FILTER_EN    0x80000000 /* bit 31 */
+#define IXGBE_ETQF_POOL_ENABLE   (1 << 26) /* bit 26 */
+
+#define IXGBE_ETQS_RX_QUEUE     0x007F0000 /* bits 22:16 */
+#define IXGBE_ETQS_RX_QUEUE_SHIFT       16
+#define IXGBE_ETQS_LLI          0x20000000 /* bit 29 */
+#define IXGBE_ETQS_QUEUE_EN     0x80000000 /* bit 31 */
+
+/*
+ * ETQF filter list: one static filter per filter consumer. This is
+ *                   to avoid filter collisions later. Add new filters
+ *                   here!!
+ *
+ * Current filters:
+ *    EAPOL 802.1x (0x888e): Filter 0
+ *    FCoE (0x8906):         Filter 2
+ *    1588 (0x88f7):         Filter 3
+ *    FIP  (0x8914):         Filter 4
+ */
+#define IXGBE_ETQF_FILTER_EAPOL          0
+#define IXGBE_ETQF_FILTER_FCOE           2
+#define IXGBE_ETQF_FILTER_1588           3
+#define IXGBE_ETQF_FILTER_FIP            4
+/* VLAN Control Bit Masks */
+#define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
+#define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
+#define IXGBE_VLNCTRL_CFIEN     0x20000000  /* bit 29 */
+#define IXGBE_VLNCTRL_VFE       0x40000000  /* bit 30 */
+#define IXGBE_VLNCTRL_VME       0x80000000  /* bit 31 */
+
+/* VLAN pool filtering masks */
+#define IXGBE_VLVF_VIEN         0x80000000  /* filter is valid */
+#define IXGBE_VLVF_ENTRIES      64
+#define IXGBE_VLVF_VLANID_MASK  0x00000FFF
+/* Per VF Port VLAN insertion rules */
+#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define IXGBE_VMVIR_VLANA_NEVER   0x80000000 /* Never insert VLAN tag */
+
+#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.1q protocol */
+
+/* STATUS Bit Masks */
+#define IXGBE_STATUS_LAN_ID         0x0000000C /* LAN ID */
+#define IXGBE_STATUS_LAN_ID_SHIFT   2          /* LAN ID Shift*/
+#define IXGBE_STATUS_GIO            0x00080000 /* GIO Master Enable Status */
+
+#define IXGBE_STATUS_LAN_ID_0   0x00000000 /* LAN ID 0 */
+#define IXGBE_STATUS_LAN_ID_1   0x00000004 /* LAN ID 1 */
+
+/* ESDP Bit Masks */
+#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */
+#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */
+#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */
+#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */
+#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */
+#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
+#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
+#define IXGBE_ESDP_SDP4_DIR     0x00000004 /* SDP4 IO direction */
+#define IXGBE_ESDP_SDP5_DIR     0x00002000 /* SDP5 IO direction */
+
+/* LEDCTL Bit Masks */
+#define IXGBE_LED_IVRT_BASE      0x00000040
+#define IXGBE_LED_BLINK_BASE     0x00000080
+#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
+#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
+#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_IVRT(_i)       IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
+#define IXGBE_LED_BLINK(_i)      IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
+#define IXGBE_LED_MODE_MASK(_i)  IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
+
+/* LED modes */
+#define IXGBE_LED_LINK_UP       0x0
+#define IXGBE_LED_LINK_10G      0x1
+#define IXGBE_LED_MAC           0x2
+#define IXGBE_LED_FILTER        0x3
+#define IXGBE_LED_LINK_ACTIVE   0x4
+#define IXGBE_LED_LINK_1G       0x5
+#define IXGBE_LED_ON            0xE
+#define IXGBE_LED_OFF           0xF
+
+/* AUTOC Bit Masks */
+#define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000
+#define IXGBE_AUTOC_KX4_SUPP    0x80000000
+#define IXGBE_AUTOC_KX_SUPP     0x40000000
+#define IXGBE_AUTOC_PAUSE       0x30000000
+#define IXGBE_AUTOC_ASM_PAUSE   0x20000000
+#define IXGBE_AUTOC_SYM_PAUSE   0x10000000
+#define IXGBE_AUTOC_RF          0x08000000
+#define IXGBE_AUTOC_PD_TMR      0x06000000
+#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000
+#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000
+#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000
+#define IXGBE_AUTOC_FECA        0x00040000
+#define IXGBE_AUTOC_FECR        0x00020000
+#define IXGBE_AUTOC_KR_SUPP     0x00010000
+#define IXGBE_AUTOC_AN_RESTART  0x00001000
+#define IXGBE_AUTOC_FLU         0x00000001
+#define IXGBE_AUTOC_LMS_SHIFT   13
+#define IXGBE_AUTOC_LMS_10G_SERIAL      (0x3 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR       (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_SGMII_1G_100M   (0x5 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_MASK            (0x7 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN   (0x0 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN  (0x1 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_1G_AN           (0x2 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN          (0x4 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN    (0x6 << IXGBE_AUTOC_LMS_SHIFT)
+#define IXGBE_AUTOC_LMS_ATTACH_TYPE     (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC_1G_PMA_PMD_MASK    0x00000200
+#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT   9
+#define IXGBE_AUTOC_10G_PMA_PMD_MASK   0x00000180
+#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT  7
+#define IXGBE_AUTOC_10G_XAUI   (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4    (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4    (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX      (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX      (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI     (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX   (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+
+#define IXGBE_AUTOC2_UPPER_MASK  0xFFFF0000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK  0x00030000
+#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
+#define IXGBE_AUTOC2_10G_KR  (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+
+#define IXGBE_MACC_FLU       0x00000001
+#define IXGBE_MACC_FSV_10G   0x00030000
+#define IXGBE_MACC_FS        0x00040000
+#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+
+/* LINKS Bit Masks */
+#define IXGBE_LINKS_KX_AN_COMP  0x80000000
+#define IXGBE_LINKS_UP          0x40000000
+#define IXGBE_LINKS_SPEED       0x20000000
+#define IXGBE_LINKS_MODE        0x18000000
+#define IXGBE_LINKS_RX_MODE     0x06000000
+#define IXGBE_LINKS_TX_MODE     0x01800000
+#define IXGBE_LINKS_XGXS_EN     0x00400000
+#define IXGBE_LINKS_SGMII_EN    0x02000000
+#define IXGBE_LINKS_PCS_1G_EN   0x00200000
+#define IXGBE_LINKS_1G_AN_EN    0x00100000
+#define IXGBE_LINKS_KX_AN_IDLE  0x00080000
+#define IXGBE_LINKS_1G_SYNC     0x00040000
+#define IXGBE_LINKS_10G_ALIGN   0x00020000
+#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000
+#define IXGBE_LINKS_TL_FAULT    0x00001000
+#define IXGBE_LINKS_SIGNAL      0x00000F00
+
+#define IXGBE_LINKS_SPEED_82599     0x30000000
+#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
+#define IXGBE_LINKS_SPEED_1G_82599  0x20000000
+#define IXGBE_LINKS_SPEED_100_82599 0x10000000
+#define IXGBE_LINK_UP_TIME      90 /* 9.0 Seconds */
+#define IXGBE_AUTO_NEG_TIME     45 /* 4.5 Seconds */
+
+#define IXGBE_LINKS2_AN_SUPPORTED   0x00000040
+
+/* PCS1GLSTA Bit Masks */
+#define IXGBE_PCS1GLSTA_LINK_OK         1
+#define IXGBE_PCS1GLSTA_SYNK_OK         0x10
+#define IXGBE_PCS1GLSTA_AN_COMPLETE     0x10000
+#define IXGBE_PCS1GLSTA_AN_PAGE_RX      0x20000
+#define IXGBE_PCS1GLSTA_AN_TIMED_OUT    0x40000
+#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000
+#define IXGBE_PCS1GLSTA_AN_ERROR_RWS    0x100000
+
+#define IXGBE_PCS1GANA_SYM_PAUSE        0x80
+#define IXGBE_PCS1GANA_ASM_PAUSE        0x100
+
+/* PCS1GLCTL Bit Masks */
+#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN  0x00040000 /* PCS 1G autoneg to en */
+#define IXGBE_PCS1GLCTL_FLV_LINK_UP     1
+#define IXGBE_PCS1GLCTL_FORCE_LINK      0x20
+#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH  0x40
+#define IXGBE_PCS1GLCTL_AN_ENABLE       0x10000
+#define IXGBE_PCS1GLCTL_AN_RESTART      0x20000
+
+/* ANLP1 Bit Masks */
+#define IXGBE_ANLP1_PAUSE               0x0C00
+#define IXGBE_ANLP1_SYM_PAUSE           0x0400
+#define IXGBE_ANLP1_ASM_PAUSE           0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK       0x000f0000
+
+/* SW Semaphore Register bitmasks */
+#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
+#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
+#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
+#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
+
+/* SW_FW_SYNC/GSSR definitions */
+#define IXGBE_GSSR_EEP_SM     0x0001
+#define IXGBE_GSSR_PHY0_SM    0x0002
+#define IXGBE_GSSR_PHY1_SM    0x0004
+#define IXGBE_GSSR_MAC_CSR_SM 0x0008
+#define IXGBE_GSSR_FLASH_SM   0x0010
+#define IXGBE_GSSR_SW_MNG_SM  0x0400
+
+/* FW Status register bitmask */
+#define IXGBE_FWSTS_FWRI    0x00000200 /* Firmware Reset Indication */
+
+/* EEC Register */
+#define IXGBE_EEC_SK        0x00000001 /* EEPROM Clock */
+#define IXGBE_EEC_CS        0x00000002 /* EEPROM Chip Select */
+#define IXGBE_EEC_DI        0x00000004 /* EEPROM Data In */
+#define IXGBE_EEC_DO        0x00000008 /* EEPROM Data Out */
+#define IXGBE_EEC_FWE_MASK  0x00000030 /* FLASH Write Enable */
+#define IXGBE_EEC_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define IXGBE_EEC_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define IXGBE_EEC_FWE_SHIFT 4
+#define IXGBE_EEC_REQ       0x00000040 /* EEPROM Access Request */
+#define IXGBE_EEC_GNT       0x00000080 /* EEPROM Access Grant */
+#define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */
+#define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */
+#define IXGBE_EEC_FLUP      0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL   0x02000000 /* Sector 1 Valid */
+#define IXGBE_EEC_FLUDONE   0x04000000 /* Flash update done */
+/* EEPROM Addressing bits based on type (0-small, 1-large) */
+#define IXGBE_EEC_ADDR_SIZE 0x00000400
+#define IXGBE_EEC_SIZE      0x00007800 /* EEPROM Size */
+#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */
+
+#define IXGBE_EEC_SIZE_SHIFT            11
+#define IXGBE_EEPROM_WORD_SIZE_SHIFT    6
+#define IXGBE_EEPROM_OPCODE_BITS        8
+
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
+/* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD  0xFAFA
+#define IXGBE_EEPROM_CHECKSUM   0x3F
+#define IXGBE_EEPROM_SUM        0xBABA
+#define IXGBE_PCIE_ANALOG_PTR   0x03
+#define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR           0x04
+#define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR    0x05
+#define IXGBE_PCIE_GENERAL_PTR  0x06
+#define IXGBE_PCIE_CONFIG0_PTR  0x07
+#define IXGBE_PCIE_CONFIG1_PTR  0x08
+#define IXGBE_CORE0_PTR         0x09
+#define IXGBE_CORE1_PTR         0x0A
+#define IXGBE_MAC0_PTR          0x0B
+#define IXGBE_MAC1_PTR          0x0C
+#define IXGBE_CSR0_CONFIG_PTR   0x0D
+#define IXGBE_CSR1_CONFIG_PTR   0x0E
+#define IXGBE_FW_PTR            0x0F
+#define IXGBE_PBANUM0_PTR       0x15
+#define IXGBE_PBANUM1_PTR       0x16
+#define IXGBE_FREE_SPACE_PTR    0X3E
+#define IXGBE_SAN_MAC_ADDR_PTR  0x28
+#define IXGBE_DEVICE_CAPS       0x2C
+#define IXGBE_DEVICE_CAPS_EXT_THERMAL_SENSOR 0x10
+#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
+#define IXGBE_PCIE_MSIX_82599_CAPS  0x72
+#define IXGBE_PCIE_MSIX_82598_CAPS  0x62
+
+/* MSI-X capability fields masks */
+#define IXGBE_PCIE_MSIX_TBL_SZ_MASK     0x7FF
+
+/* Legacy EEPROM word offsets */
+#define IXGBE_ISCSI_BOOT_CAPS           0x0033
+#define IXGBE_ISCSI_SETUP_PORT_0        0x0030
+#define IXGBE_ISCSI_SETUP_PORT_1        0x0034
+
+/* EEPROM Commands - SPI */
+#define IXGBE_EEPROM_MAX_RETRY_SPI      5000 /* Max wait 5ms for RDY signal */
+#define IXGBE_EEPROM_STATUS_RDY_SPI     0x01
+#define IXGBE_EEPROM_READ_OPCODE_SPI    0x03  /* EEPROM read opcode */
+#define IXGBE_EEPROM_WRITE_OPCODE_SPI   0x02  /* EEPROM write opcode */
+#define IXGBE_EEPROM_A8_OPCODE_SPI      0x08  /* opcode bit-3 = addr bit-8 */
+#define IXGBE_EEPROM_WREN_OPCODE_SPI    0x06  /* EEPROM set Write Ena latch */
+/* EEPROM reset Write Enable latch */
+#define IXGBE_EEPROM_WRDI_OPCODE_SPI    0x04
+#define IXGBE_EEPROM_RDSR_OPCODE_SPI    0x05  /* EEPROM read Status reg */
+#define IXGBE_EEPROM_WRSR_OPCODE_SPI    0x01  /* EEPROM write Status reg */
+#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20  /* EEPROM ERASE 4KB */
+#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define IXGBE_EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Read Register */
+#define IXGBE_EEPROM_RW_REG_DATA   16 /* data offset in EEPROM read reg */
+#define IXGBE_EEPROM_RW_REG_DONE   2  /* Offset to READ done bit */
+#define IXGBE_EEPROM_RW_REG_START  1  /* First bit to start operation */
+#define IXGBE_EEPROM_RW_ADDR_SHIFT 2  /* Shift to the address bits */
+#define IXGBE_NVM_POLL_WRITE       1  /* Flag for polling for write complete */
+#define IXGBE_NVM_POLL_READ        0  /* Flag for polling for read complete */
+
+#define IXGBE_ETH_LENGTH_OF_ADDRESS   6
+
+#define IXGBE_EEPROM_PAGE_SIZE_MAX       128
+#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
+#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
+
+#ifndef IXGBE_EEPROM_GRANT_ATTEMPTS
+#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+
+/* Number of 5 microseconds we wait for EERD read and
+ * EERW write to complete */
+#define IXGBE_EERD_EEWR_ATTEMPTS 100000
+
+/* # attempts we wait for flush update to complete */
+#define IXGBE_FLUDONE_ATTEMPTS 20000
+
+#define IXGBE_PCIE_CTRL2                 0x5   /* PCIe Control 2 Offset */
+#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE    0x8   /* Dummy Function Enable */
+#define IXGBE_PCIE_CTRL2_LAN_DISABLE     0x2   /* LAN PCI Disable */
+#define IXGBE_PCIE_CTRL2_DISABLE_SELECT  0x1   /* LAN Disable Select */
+
+#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET  0x0
+#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET  0x3
+#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP  0x1
+#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS  0x2
+#define IXGBE_FW_LESM_PARAMETERS_PTR     0x2
+#define IXGBE_FW_LESM_STATE_1            0x1
+#define IXGBE_FW_LESM_STATE_ENABLED      0x8000 /* LESM Enable bit */
+#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR   0x4
+#define IXGBE_FW_PATCH_VERSION_4         0x7
+#define IXGBE_FCOE_IBA_CAPS_BLK_PTR         0x33 /* iSCSI/FCOE block */
+#define IXGBE_FCOE_IBA_CAPS_FCOE            0x20 /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_BLK_PTR            0x17 /* iSCSI/FCOE block */
+#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET       0x0  /* FCOE flags */
+#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE       0x1  /* FCOE flags enable bit */
+#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR      0x27 /* Alt. SAN MAC block */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET  0x0  /* Alt. SAN MAC capability */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1  /* Alt. SAN MAC 0 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4  /* Alt. SAN MAC 1 offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET  0x7  /* Alt. WWNN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET  0x8  /* Alt. WWPN prefix offset */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC  0x0  /* Alt. SAN MAC exists */
+#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN  0x1  /* Alt. WWN base exists */
+
+#define IXGBE_DEVICE_CAPS_WOL_PORT0_1  0x4 /* WoL supported on ports 0 & 1 */
+#define IXGBE_DEVICE_CAPS_WOL_PORT0    0x8 /* WoL supported on port 0 */
+#define IXGBE_DEVICE_CAPS_WOL_MASK     0xC /* Mask for WoL capabilities */
+
+/* PCI Bus Info */
+#define IXGBE_PCI_DEVICE_STATUS   0xAA
+#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING   0x0020
+#define IXGBE_PCI_LINK_STATUS     0xB2
+#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
+#define IXGBE_PCI_LINK_WIDTH      0x3F0
+#define IXGBE_PCI_LINK_WIDTH_1    0x10
+#define IXGBE_PCI_LINK_WIDTH_2    0x20
+#define IXGBE_PCI_LINK_WIDTH_4    0x40
+#define IXGBE_PCI_LINK_WIDTH_8    0x80
+#define IXGBE_PCI_LINK_SPEED      0xF
+#define IXGBE_PCI_LINK_SPEED_2500 0x1
+#define IXGBE_PCI_LINK_SPEED_5000 0x2
+#define IXGBE_PCI_HEADER_TYPE_REGISTER  0x0E
+#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
+#define IXGBE_PCI_DEVICE_CONTROL2_16ms  0x0005
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+
+/* Check whether address is multicast.  This is little-endian specific check.*/
+#define IXGBE_IS_MULTICAST(Address) \
+                (bool)(((u8 *)(Address))[0] & ((u8)0x01))
+
+/* Check whether an address is broadcast. */
+#define IXGBE_IS_BROADCAST(Address)                      \
+                ((((u8 *)(Address))[0] == ((u8)0xff)) && \
+                (((u8 *)(Address))[1] == ((u8)0xff)))
+
+/* RAH */
+#define IXGBE_RAH_VIND_MASK     0x003C0000
+#define IXGBE_RAH_VIND_SHIFT    18
+#define IXGBE_RAH_AV            0x80000000
+#define IXGBE_CLEAR_VMDQ_ALL    0xFFFFFFFF
+
+/* Header split receive */
+#define IXGBE_RFCTL_ISCSI_DIS       0x00000001
+#define IXGBE_RFCTL_ISCSI_DWC_MASK  0x0000003E
+#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1
+#define IXGBE_RFCTL_RSC_DIS         0x00000010
+#define IXGBE_RFCTL_NFSW_DIS        0x00000040
+#define IXGBE_RFCTL_NFSR_DIS        0x00000080
+#define IXGBE_RFCTL_NFS_VER_MASK    0x00000300
+#define IXGBE_RFCTL_NFS_VER_SHIFT   8
+#define IXGBE_RFCTL_NFS_VER_2       0
+#define IXGBE_RFCTL_NFS_VER_3       1
+#define IXGBE_RFCTL_NFS_VER_4       2
+#define IXGBE_RFCTL_IPV6_DIS        0x00000400
+#define IXGBE_RFCTL_IPV6_XSUM_DIS   0x00000800
+#define IXGBE_RFCTL_IPFRSP_DIS      0x00004000
+#define IXGBE_RFCTL_IPV6_EX_DIS     0x00010000
+#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000
+
+/* Transmit Config masks */
+#define IXGBE_TXDCTL_ENABLE     0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_TXDCTL_SWFLSH     0x04000000 /* Tx Desc. write-back flushing */
+#define IXGBE_TXDCTL_WTHRESH_SHIFT      16 /* shift to WTHRESH bits */
+/* Enable short packet padding to 64 bytes */
+#define IXGBE_TX_PAD_ENABLE     0x00000400
+#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004  /* Allow jumbo frames */
+/* This allows for 16K packets + 4k for vlan */
+#define IXGBE_MAX_FRAME_SZ      0x40040000
+
+#define IXGBE_TDWBAL_HEAD_WB_ENABLE   0x1      /* Tx head write-back enable */
+#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2      /* Tx seq# write-back enable */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN       0x00000001  /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS     0x00000002  /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE     0x02000000  /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_SWFLSH     0x04000000  /* Rx Desc. write-back flushing */
+#define IXGBE_RXDCTL_RLPMLMASK  0x00003FFF  /* Only supported on the X540 */
+#define IXGBE_RXDCTL_RLPML_EN   0x00008000
+#define IXGBE_RXDCTL_VME        0x40000000  /* VLAN mode enable */
+
+#define IXGBE_TSYNCTXCTL_VALID     0x00000001 /* Tx timestamp valid */
+#define IXGBE_TSYNCTXCTL_ENABLED   0x00000010 /* Tx timestamping enabled */
+
+#define IXGBE_TSYNCRXCTL_VALID     0x00000001 /* Rx timestamp valid */
+#define IXGBE_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */
+#define IXGBE_TSYNCRXCTL_TYPE_L2_V2      0x00
+#define IXGBE_TSYNCRXCTL_TYPE_L4_V1      0x02
+#define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2   0x04
+#define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2   0x0A
+#define IXGBE_TSYNCRXCTL_ENABLED   0x00000010 /* Rx Timestamping enabled */
+
+#define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF
+#define IXGBE_RXMTRL_V1_SYNC_MSG         0x00
+#define IXGBE_RXMTRL_V1_DELAY_REQ_MSG    0x01
+#define IXGBE_RXMTRL_V1_FOLLOWUP_MSG     0x02
+#define IXGBE_RXMTRL_V1_DELAY_RESP_MSG   0x03
+#define IXGBE_RXMTRL_V1_MGMT_MSG         0x04
+
+#define IXGBE_RXMTRL_V2_MSGID_MASK      0x0000FF00
+#define IXGBE_RXMTRL_V2_SYNC_MSG            0x0000
+#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG       0x0100
+#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG      0x0200
+#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG     0x0300
+#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG        0x0800
+#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG      0x0900
+#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00
+#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG        0x0B00
+#define IXGBE_RXMTRL_V2_SIGNALLING_MSG      0x0C00
+#define IXGBE_RXMTRL_V2_MGMT_MSG            0x0D00
+
+#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */
+#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/
+#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */
+#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */
+#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */
+#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */
+/* Receive Priority Flow Control Enable */
+#define IXGBE_FCTRL_RPFCE 0x00004000
+#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */
+#define IXGBE_MFLCN_PMCF        0x00000001 /* Pass MAC Control Frames */
+#define IXGBE_MFLCN_DPF         0x00000002 /* Discard Pause Frame */
+#define IXGBE_MFLCN_RPFCE       0x00000004 /* Receive Priority FC Enable */
+#define IXGBE_MFLCN_RFCE        0x00000008 /* Receive FC Enable */
+#define IXGBE_MFLCN_RPFCE_SHIFT 4          /* Rx Priority FC bitmap shift */
+
+/* Multiple Receive Queue Control */
+#define IXGBE_MRQC_RSSEN                 0x00000001  /* RSS Enable */
+#define IXGBE_MRQC_MRQE_MASK                    0xF /* Bits 3:0 */
+#define IXGBE_MRQC_RT8TCEN               0x00000002 /* 8 TC no RSS */
+#define IXGBE_MRQC_RT4TCEN               0x00000003 /* 4 TC no RSS */
+#define IXGBE_MRQC_RTRSS8TCEN            0x00000004 /* 8 TC w/ RSS */
+#define IXGBE_MRQC_RTRSS4TCEN            0x00000005 /* 4 TC w/ RSS */
+#define IXGBE_MRQC_VMDQEN                0x00000008 /* VMDq2 64 pools no RSS */
+#define IXGBE_MRQC_VMDQRSS32EN           0x0000000A /* VMDq2 32 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRSS64EN           0x0000000B /* VMDq2 64 pools w/ RSS */
+#define IXGBE_MRQC_VMDQRT8TCEN           0x0000000C /* VMDq2/RT 16 pool 8 TC */
+#define IXGBE_MRQC_VMDQRT4TCEN           0x0000000D /* VMDq2/RT 32 pool 4 TC */
+#define IXGBE_MRQC_RSS_FIELD_MASK        0xFFFF0000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP    0x00010000
+#define IXGBE_MRQC_RSS_FIELD_IPV4        0x00020000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX     0x00080000
+#define IXGBE_MRQC_RSS_FIELD_IPV6        0x00100000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP    0x00200000
+#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP    0x00400000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP    0x00800000
+#define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000
+#define IXGBE_MRQC_L3L4TXSWEN            0x00008000
+
+/* Queue Drop Enable */
+#define IXGBE_QDE_ENABLE     0x00000001
+#define IXGBE_QDE_IDX_MASK   0x00007F00
+#define IXGBE_QDE_IDX_SHIFT           8
+
+#define IXGBE_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS     0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+
+#define IXGBE_RXDADV_IPSEC_STATUS_SECP                  0x00020000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL       0x08000000
+#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH         0x10000000
+#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED            0x18000000
+#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK               0x18000000
+/* Multiple Transmit Queue Command Register */
+#define IXGBE_MTQC_RT_ENA       0x1 /* DCB Enable */
+#define IXGBE_MTQC_VT_ENA       0x2 /* VMDQ2 Enable */
+#define IXGBE_MTQC_64Q_1PB      0x0 /* 64 queues 1 pack buffer */
+#define IXGBE_MTQC_32VF         0x8 /* 4 TX Queues per pool w/32VF's */
+#define IXGBE_MTQC_64VF         0x4 /* 2 TX Queues per pool w/64VF's */
+#define IXGBE_MTQC_4TC_4TQ      0x8 /* 4 TC if RT_ENA and VT_ENA */
+#define IXGBE_MTQC_8TC_8TQ      0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP      0x02    /* End of Packet */
+#define IXGBE_RXD_STAT_FLM      0x04    /* FDir Match */
+#define IXGBE_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK   0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT  0x00000004
+#define IXGBE_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS     0x20    /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT     0x200   /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_LLINT    0x800   /* Pkt caused Low Latency Interrupt */
+#define IXGBE_RXD_STAT_TS       0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP     0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB       0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE        0x01    /* CRC Error */
+#define IXGBE_RXD_ERR_LE        0x02    /* Length Error */
+#define IXGBE_RXD_ERR_PE        0x08    /* Packet Error */
+#define IXGBE_RXD_ERR_OSE       0x10    /* Oversize Error */
+#define IXGBE_RXD_ERR_USE       0x20    /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE      0x40    /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE       0x80    /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK           0xfff00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT          20         /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_RXE            0x20000000 /* Any MAC Error */
+#define IXGBE_RXDADV_ERR_FCEOFE         0x80000000 /* FCoEFe/IPE */
+#define IXGBE_RXDADV_ERR_FCERR          0x00700000 /* FCERR/FDIRERR */
+#define IXGBE_RXDADV_ERR_FDIR_LEN       0x00100000 /* FDIR Length error */
+#define IXGBE_RXDADV_ERR_FDIR_DROP      0x00200000 /* FDIR Drop error */
+#define IXGBE_RXDADV_ERR_FDIR_COLL      0x00400000 /* FDIR Collision error */
+#define IXGBE_RXDADV_ERR_HBO    0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE     0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE     0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE     0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE    0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE    0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE   0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE    0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK  0x0FFF  /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK      0xE000  /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT     13
+#define IXGBE_RXD_CFI_MASK      0x1000  /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT     12
+
+#define IXGBE_RXDADV_STAT_DD            IXGBE_RXD_STAT_DD  /* Done */
+#define IXGBE_RXDADV_STAT_EOP           IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM           IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP            IXGBE_RXD_STAT_VP  /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK          0x000fffff /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS        0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT        0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP  0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP    0x00000030 /* 11: Ctxt w/ DDP */
+#define IXGBE_RXDADV_STAT_TS            0x00010000 /* IEEE1588 Time Stamp */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR    0x00000010
+#define IXGBE_PSRTYPE_UDPHDR    0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR   0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR   0x00000200
+#define IXGBE_PSRTYPE_L2HDR     0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT     10     /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT        22
+#define IXGBE_SRRCTL_RDMTS_MASK         0x01C00000
+#define IXGBE_SRRCTL_DROP_EN            0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK      0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK      0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY    0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT  0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK      0x0E000000
+
+#define IXGBE_RXDPS_HDRSTAT_HDRSP       0x00008000
+#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF
+
+#define IXGBE_RXDADV_RSSTYPE_MASK       0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK       0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX    0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK     0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK        0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT       17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT    5
+#define IXGBE_RXDADV_SPLITHEADER_EN     0x00001000
+#define IXGBE_RXDADV_SPH                0x8000
+
+/* RSS Hash results */
+#define IXGBE_RXDADV_RSSTYPE_NONE       0x00000000
+#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP   0x00000001
+#define IXGBE_RXDADV_RSSTYPE_IPV4       0x00000002
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP   0x00000003
+#define IXGBE_RXDADV_RSSTYPE_IPV6_EX    0x00000004
+#define IXGBE_RXDADV_RSSTYPE_IPV6       0x00000005
+#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006
+#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP   0x00000007
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP   0x00000008
+#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009
+
+/* RSS Packet Types as indicated in the receive descriptor. */
+#define IXGBE_RXDADV_PKTTYPE_NONE       0x00000000
+#define IXGBE_RXDADV_PKTTYPE_IPV4       0x00000010 /* IPv4 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV4_EX    0x00000020 /* IPv4 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_IPV6       0x00000040 /* IPv6 hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPV6_EX    0x00000080 /* IPv6 hdr + extensions */
+#define IXGBE_RXDADV_PKTTYPE_TCP        0x00000100 /* TCP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_UDP        0x00000200 /* UDP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_SCTP       0x00000400 /* SCTP hdr present */
+#define IXGBE_RXDADV_PKTTYPE_NFS        0x00000800 /* NFS hdr present */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP  0x00001000 /* IPSec ESP */
+#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH   0x00002000 /* IPSec AH */
+#define IXGBE_RXDADV_PKTTYPE_LINKSEC    0x00004000 /* LinkSec Encap */
+#define IXGBE_RXDADV_PKTTYPE_ETQF       0x00008000 /* PKTTYPE is ETQF index */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK  0x00000070 /* ETQF has 8 indices */
+#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4          /* Right-shift 4 bits */
+
+/* Security Processing bit Indication */
+#define IXGBE_RXDADV_LNKSEC_STATUS_SECP         0x00020000
+#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH   0x08000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR  0x10000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK      0x18000000
+#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG       0x18000000
+
+/* Masks to determine if packets should be dropped due to frame errors */
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+                                      IXGBE_RXD_ERR_CE | \
+                                      IXGBE_RXD_ERR_LE | \
+                                      IXGBE_RXD_ERR_PE | \
+                                      IXGBE_RXD_ERR_OSE | \
+                                      IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+                                      IXGBE_RXDADV_ERR_CE | \
+                                      IXGBE_RXDADV_ERR_LE | \
+                                      IXGBE_RXDADV_ERR_PE | \
+                                      IXGBE_RXDADV_ERR_OSE | \
+                                      IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599   IXGBE_RXDADV_ERR_RXE
+
+/* Multicast bit mask */
+#define IXGBE_MCSTCTRL_MFE      0x4
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE  8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY   1024
+
+/* Vlan-specific macros */
+#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK  0x0FFF /* VLAN ID in lower 12 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_MASK   0xE000 /* Priority in upper 3 bits */
+#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT  0x000D /* Priority in upper 3 of 16 */
+#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT  IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4)
+#define IXGBE_MBVFICR(_i)                (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i)                  (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i)                 (0x00700 + (_i * 4))
+
+/* Little Endian defines */
+#ifndef __le16
+#define __le16  u16
+#endif
+#ifndef __le32
+#define __le32  u32
+#endif
+#ifndef __le64
+#define __le64  u64
+
+#endif
+#ifndef __be16
+/* Big Endian defines */
+#define __be16  u16
+#define __be32  u32
+#define __be64  u64
+
+#endif
+enum ixgbe_fdir_pballoc_type {
+       IXGBE_FDIR_PBALLOC_NONE = 0,
+       IXGBE_FDIR_PBALLOC_64K  = 1,
+       IXGBE_FDIR_PBALLOC_128K = 2,
+       IXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+/* Flow Director register values */
+#define IXGBE_FDIRCTRL_PBALLOC_64K              0x00000001
+#define IXGBE_FDIRCTRL_PBALLOC_128K             0x00000002
+#define IXGBE_FDIRCTRL_PBALLOC_256K             0x00000003
+#define IXGBE_FDIRCTRL_INIT_DONE                0x00000008
+#define IXGBE_FDIRCTRL_PERFECT_MATCH            0x00000010
+#define IXGBE_FDIRCTRL_REPORT_STATUS            0x00000020
+#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS     0x00000080
+#define IXGBE_FDIRCTRL_DROP_Q_SHIFT             8
+#define IXGBE_FDIRCTRL_FLEX_SHIFT               16
+#define IXGBE_FDIRCTRL_SEARCHLIM                0x00800000
+#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT         24
+#define IXGBE_FDIRCTRL_FULL_THRESH_MASK         0xF0000000
+#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT        28
+
+#define IXGBE_FDIRTCPM_DPORTM_SHIFT             16
+#define IXGBE_FDIRUDPM_DPORTM_SHIFT             16
+#define IXGBE_FDIRIP6M_DIPM_SHIFT               16
+#define IXGBE_FDIRM_VLANID                      0x00000001
+#define IXGBE_FDIRM_VLANP                       0x00000002
+#define IXGBE_FDIRM_POOL                        0x00000004
+#define IXGBE_FDIRM_L4P                         0x00000008
+#define IXGBE_FDIRM_FLEX                        0x00000010
+#define IXGBE_FDIRM_DIPv6                       0x00000020
+
+#define IXGBE_FDIRFREE_FREE_MASK                0xFFFF
+#define IXGBE_FDIRFREE_FREE_SHIFT               0
+#define IXGBE_FDIRFREE_COLL_MASK                0x7FFF0000
+#define IXGBE_FDIRFREE_COLL_SHIFT               16
+#define IXGBE_FDIRLEN_MAXLEN_MASK               0x3F
+#define IXGBE_FDIRLEN_MAXLEN_SHIFT              0
+#define IXGBE_FDIRLEN_MAXHASH_MASK              0x7FFF0000
+#define IXGBE_FDIRLEN_MAXHASH_SHIFT             16
+#define IXGBE_FDIRUSTAT_ADD_MASK                0xFFFF
+#define IXGBE_FDIRUSTAT_ADD_SHIFT               0
+#define IXGBE_FDIRUSTAT_REMOVE_MASK             0xFFFF0000
+#define IXGBE_FDIRUSTAT_REMOVE_SHIFT            16
+#define IXGBE_FDIRFSTAT_FADD_MASK               0x00FF
+#define IXGBE_FDIRFSTAT_FADD_SHIFT              0
+#define IXGBE_FDIRFSTAT_FREMOVE_MASK            0xFF00
+#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT           8
+#define IXGBE_FDIRPORT_DESTINATION_SHIFT        16
+#define IXGBE_FDIRVLAN_FLEX_SHIFT               16
+#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT       15
+#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT       16
+
+#define IXGBE_FDIRCMD_CMD_MASK                  0x00000003
+#define IXGBE_FDIRCMD_CMD_ADD_FLOW              0x00000001
+#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW           0x00000002
+#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT        0x00000003
+#define IXGBE_FDIRCMD_FILTER_VALID              0x00000004
+#define IXGBE_FDIRCMD_FILTER_UPDATE             0x00000008
+#define IXGBE_FDIRCMD_IPv6DMATCH                0x00000010
+#define IXGBE_FDIRCMD_L4TYPE_UDP                0x00000020
+#define IXGBE_FDIRCMD_L4TYPE_TCP                0x00000040
+#define IXGBE_FDIRCMD_L4TYPE_SCTP               0x00000060
+#define IXGBE_FDIRCMD_IPV6                      0x00000080
+#define IXGBE_FDIRCMD_CLEARHT                   0x00000100
+#define IXGBE_FDIRCMD_DROP                      0x00000200
+#define IXGBE_FDIRCMD_INT                       0x00000400
+#define IXGBE_FDIRCMD_LAST                      0x00000800
+#define IXGBE_FDIRCMD_COLLISION                 0x00001000
+#define IXGBE_FDIRCMD_QUEUE_EN                  0x00008000
+#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT           5
+#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT            16
+#define IXGBE_FDIRCMD_VT_POOL_SHIFT             24
+#define IXGBE_FDIR_INIT_DONE_POLL               10
+#define IXGBE_FDIRCMD_CMD_POLL                  10
+
+#define IXGBE_FDIR_DROP_QUEUE                   127
+
+
+/* Manageablility Host Interface defines */
+#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
+#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
+#define IXGBE_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+/* CEM Support */
+#define FW_CEM_HDR_LEN                0x4
+#define FW_CEM_CMD_DRIVER_INFO        0xDD
+#define FW_CEM_CMD_DRIVER_INFO_LEN    0x5
+#define FW_CEM_CMD_RESERVED           0X0
+#define FW_CEM_UNUSED_VER             0x0
+#define FW_CEM_MAX_RETRIES            3
+#define FW_CEM_RESP_STATUS_SUCCESS    0x1
+
+/* Host Interface Command Structures */
+
+struct ixgbe_hic_hdr {
+       u8 cmd;
+       u8 buf_len;
+       union {
+               u8 cmd_resv;
+               u8 ret_status;
+       } cmd_or_resp;
+       u8 checksum;
+};
+
+struct ixgbe_hic_drv_info {
+       struct ixgbe_hic_hdr hdr;
+       u8 port_num;
+       u8 ver_sub;
+       u8 ver_build;
+       u8 ver_min;
+       u8 ver_maj;
+       u8 pad; /* end spacing to ensure length is mult. of dword */
+       u16 pad2; /* end spacing to ensure length is mult. of dword2 */
+};
+
+/* Transmit Descriptor - Legacy */
+struct ixgbe_legacy_tx_desc {
+       u64 buffer_addr;       /* Address of the descriptor's data buffer */
+       union {
+               __le32 data;
+               struct {
+                       __le16 length;    /* Data buffer length */
+                       u8 cso;           /* Checksum offset */
+                       u8 cmd;           /* Descriptor control */
+               } flags;
+       } lower;
+       union {
+               __le32 data;
+               struct {
+                       u8 status;        /* Descriptor status */
+                       u8 css;           /* Checksum start */
+                       __le16 vlan;
+               } fields;
+       } upper;
+};
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+       struct {
+               __le64 buffer_addr;      /* Address of descriptor's data buf */
+               __le32 cmd_type_len;
+               __le32 olinfo_status;
+       } read;
+       struct {
+               __le64 rsvd;       /* Reserved */
+               __le32 nxtseq_seed;
+               __le32 status;
+       } wb;
+};
+
+/* Receive Descriptor - Legacy */
+struct ixgbe_legacy_rx_desc {
+       __le64 buffer_addr; /* Address of the descriptor's data buffer */
+       __le16 length;      /* Length of data DMAed into data buffer */
+       __le16 csum;        /* Packet checksum */
+       u8 status;          /* Descriptor status */
+       u8 errors;          /* Descriptor Errors */
+       __le16 vlan;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+       } read;
+       struct {
+               struct {
+                       union {
+                               __le32 data;
+                               struct {
+                                       __le16 pkt_info; /* RSS, Pkt type */
+                                       __le16 hdr_info; /* Splithdr, hdrlen */
+                               } hs_rss;
+                       } lo_dword;
+                       union {
+                               __le32 rss; /* RSS Hash */
+                               struct {
+                                       __le16 ip_id; /* IP id */
+                                       __le16 csum; /* Packet Checksum */
+                               } csum_ip;
+                       } hi_dword;
+               } lower;
+               struct {
+                       __le32 status_error; /* ext status/error */
+                       __le16 length; /* Packet length */
+                       __le16 vlan; /* VLAN tag */
+               } upper;
+       } wb;  /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+       __le32 vlan_macip_lens;
+       __le32 seqnum_seed;
+       __le32 type_tucmd_mlhl;
+       __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTALEN_MASK      0x0000FFFF /* Data buf length(bytes) */
+#define IXGBE_ADVTXD_MAC_LINKSEC      0x00040000 /* Insert LinkSec */
+#define IXGBE_ADVTXD_MAC_TSTAMP       0x00080000 /* IEEE1588 time stamp */
+#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK   0x000003FF /* IPSec SA index */
+#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK    0x000001FF /* IPSec ESP length */
+#define IXGBE_ADVTXD_DTYP_MASK  0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT  0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA  0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP   IXGBE_TXD_CMD_EOP  /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS  IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS    IXGBE_TXD_CMD_RS   /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000    /* DDP hdr type or iSCSI */
+#define IXGBE_ADVTXD_DCMD_DEXT  IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE   IXGBE_TXD_CMD_VLE  /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE   0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD    IXGBE_TXD_STAT_DD  /* Descriptor Done */
+#define IXGBE_ADVTXD_STAT_SN_CRC      0x00000002 /* NXTSEQ/SEED pres in WB */
+#define IXGBE_ADVTXD_STAT_RSV   0x0000000C /* STA Reserved */
+#define IXGBE_ADVTXD_IDX_SHIFT  4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_CC         0x00000080 /* Check Context */
+#define IXGBE_ADVTXD_POPTS_SHIFT      8  /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+                                 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+                                 IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_ISCO_1ST  0x00000000 /* 1st TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_MDL  0x00000800 /* Middle TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */
+#define IXGBE_ADVTXD_POPTS_RSV       0x00002000 /* POPTS Reserved */
+#define IXGBE_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT      16  /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_TUCMD_IPV4      0x00000400  /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6      0x00000000  /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP   0x00000000  /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP   0x00000800  /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP  0x00001000  /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_TUCMD_MKRREQ    0x00002000 /*Req requires Markers and CRC*/
+#define IXGBE_ADVTXD_POPTS_IPSEC      0x00000400 /* IPSec offload request */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
+#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
+#define IXGBE_ADVTXT_TUCMD_FCOE      0x00008000       /* FCoE Frame Type */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK  (0x3 << 10)      /* FC EOF index */
+#define IXGBE_ADVTXD_FCOEF_SOF       ((1 << 2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC    ((1 << 3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE      ((1 << 4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS      ((1 << 5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N     (0x0 << 10)      /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T     (0x1 << 10)      /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI    (0x2 << 10)      /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A     (0x3 << 10)      /* 11: EOFa */
+#define IXGBE_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT       16  /* Adv ctxt MSS shift */
+
+/* Autonegotiation advertised speeds */
+typedef u32 ixgbe_autoneg_advertised;
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_UNKNOWN   0
+#define IXGBE_LINK_SPEED_100_FULL  0x0008
+#define IXGBE_LINK_SPEED_1GB_FULL  0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
+                                        IXGBE_LINK_SPEED_10GB_FULL)
+#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
+                                        IXGBE_LINK_SPEED_1GB_FULL | \
+                                        IXGBE_LINK_SPEED_10GB_FULL)
+
+
+/* Physical layer type */
+typedef u32 ixgbe_physical_layer;
+#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
+#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
+#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
+#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
+#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
+#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
+#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080
+#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
+#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
+#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
+#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
+#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
+#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
+
+/* Flow Control Data Sheet defined values
+ * Calculation and defines taken from 802.1bb Annex O
+ */
+
+/* BitTimes (BT) conversion */
+#define IXGBE_BT2KB(BT) ((BT + 1023) / (8 * 1024))
+#define IXGBE_B2BT(BT) (BT * 8)
+
+/* Calculate Delay to respond to PFC */
+#define IXGBE_PFC_D    672
+
+/* Calculate Cable Delay */
+#define IXGBE_CABLE_DC 5556 /* Delay Copper */
+#define IXGBE_CABLE_DO 5000 /* Delay Optical */
+
+/* Calculate Interface Delay X540 */
+#define IXGBE_PHY_DC   25600       /* Delay 10G BASET */
+#define IXGBE_MAC_DC   8192        /* Delay Copper XAUI interface */
+#define IXGBE_XAUI_DC  (2 * 2048) /* Delay Copper Phy */
+
+#define IXGBE_ID_X540  (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC)
+
+/* Calculate Interface Delay 82598, 82599 */
+#define IXGBE_PHY_D    12800
+#define IXGBE_MAC_D    4096
+#define IXGBE_XAUI_D   (2 * 1024)
+
+#define IXGBE_ID       (IXGBE_MAC_D + IXGBE_XAUI_D + IXGBE_PHY_D)
+
+/* Calculate Delay incurred from higher layer */
+#define IXGBE_HD       6144
+
+/* Calculate PCI Bus delay for low thresholds */
+#define IXGBE_PCI_DELAY        10000
+
+/* Calculate X540 delay value in bit times */
+#define IXGBE_FILL_RATE (36 / 25)
+
+#define IXGBE_DV_X540(LINK, TC) (IXGBE_FILL_RATE * \
+                                (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+                                (2 * IXGBE_CABLE_DC) + \
+                                (2 * IXGBE_ID_X540) + \
+                                IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate 82599, 82598 delay value in bit times */
+#define IXGBE_DV(LINK, TC) (IXGBE_FILL_RATE * \
+                           (IXGBE_B2BT(LINK) + IXGBE_PFC_D + \
+                           (2 * IXGBE_CABLE_DC) + (2 * IXGBE_ID) + \
+                           IXGBE_HD + IXGBE_B2BT(TC)))
+
+/* Calculate low threshold delay values */
+#define IXGBE_LOW_DV_X540(TC) (2 * IXGBE_B2BT(TC) + \
+                              (IXGBE_FILL_RATE * IXGBE_PCI_DELAY))
+#define IXGBE_LOW_DV(TC)      (2 * IXGBE_LOW_DV_X540(TC))
+
+/* Software ATR hash keys */
+#define IXGBE_ATR_BUCKET_HASH_KEY    0x3DAD14E2
+#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define IXGBE_ATR_HASH_MASK     0x7fff
+#define IXGBE_ATR_L4TYPE_MASK      0x3
+#define IXGBE_ATR_L4TYPE_UDP       0x1
+#define IXGBE_ATR_L4TYPE_TCP       0x2
+#define IXGBE_ATR_L4TYPE_SCTP      0x3
+#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+enum ixgbe_atr_flow_type {
+       IXGBE_ATR_FLOW_TYPE_IPV4   = 0x0,
+       IXGBE_ATR_FLOW_TYPE_UDPV4  = 0x1,
+       IXGBE_ATR_FLOW_TYPE_TCPV4  = 0x2,
+       IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+       IXGBE_ATR_FLOW_TYPE_IPV6   = 0x4,
+       IXGBE_ATR_FLOW_TYPE_UDPV6  = 0x5,
+       IXGBE_ATR_FLOW_TYPE_TCPV6  = 0x6,
+       IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+};
+
+/* Flow Director ATR input struct. */
+union ixgbe_atr_input {
+       /*
+        * Byte layout in order, all values with MSB first:
+        *
+        * vm_pool    - 1 byte
+        * flow_type  - 1 byte
+        * vlan_id    - 2 bytes
+        * src_ip     - 16 bytes
+        * dst_ip     - 16 bytes
+        * src_port   - 2 bytes
+        * dst_port   - 2 bytes
+        * flex_bytes - 2 bytes
+        * bkt_hash   - 2 bytes
+        */
+       struct {
+               u8     vm_pool;
+               u8     flow_type;
+               __be16 vlan_id;
+               __be32 dst_ip[4];
+               __be32 src_ip[4];
+               __be16 src_port;
+               __be16 dst_port;
+               __be16 flex_bytes;
+               __be16 bkt_hash;
+       } formatted;
+       __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union ixgbe_atr_hash_dword {
+       struct {
+               u8 vm_pool;
+               u8 flow_type;
+               __be16 vlan_id;
+       } formatted;
+       __be32 ip;
+       struct {
+               __be16 src;
+               __be16 dst;
+       } port;
+       __be16 flex_bytes;
+       __be32 dword;
+};
+
+
+/*
+ * Unavailable: The FCoE Boot Option ROM is not present in the flash.
+ * Disabled: Present; boot order is not set for any targets on the port.
+ * Enabled: Present; boot order is set for at least one target on the port.
+ */
+enum ixgbe_fcoe_boot_status {
+    ixgbe_fcoe_bootstatus_disabled        = 0,
+    ixgbe_fcoe_bootstatus_enabled         = 1,
+    ixgbe_fcoe_bootstatus_unavailable     = 0xFFFF
+};
+
+enum ixgbe_eeprom_type {
+       ixgbe_eeprom_uninitialized = 0,
+       ixgbe_eeprom_spi,
+       ixgbe_flash,
+       ixgbe_eeprom_none /* No NVM support */
+};
+
+enum ixgbe_mac_type {
+       ixgbe_mac_unknown = 0,
+       ixgbe_mac_82598EB,
+       ixgbe_mac_82599EB,
+       ixgbe_mac_82599_vf,
+    ixgbe_mac_X540,
+       ixgbe_mac_X540_vf,
+       ixgbe_num_macs
+};
+
+enum ixgbe_phy_type {
+       ixgbe_phy_unknown = 0,
+       ixgbe_phy_none,
+       ixgbe_phy_tn,
+       ixgbe_phy_aq,
+       ixgbe_phy_cu_unknown,
+       ixgbe_phy_qt,
+       ixgbe_phy_xaui,
+       ixgbe_phy_nl,
+       ixgbe_phy_sfp_passive_tyco,
+       ixgbe_phy_sfp_passive_unknown,
+       ixgbe_phy_sfp_active_unknown,
+       ixgbe_phy_sfp_avago,
+       ixgbe_phy_sfp_ftl,
+       ixgbe_phy_sfp_ftl_active,
+       ixgbe_phy_sfp_unknown,
+       ixgbe_phy_sfp_intel,
+       ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/
+       ixgbe_phy_generic
+};
+
+/*
+ * SFP+ module type IDs:
+ *
+ * ID  Module Type
+ * =============
+ * 0   SFP_DA_CU
+ * 1   SFP_SR
+ * 2   SFP_LR
+ * 3    SFP_DA_CU_CORE0 - 82599-specific
+ * 4    SFP_DA_CU_CORE1 - 82599-specific
+ * 5    SFP_SR/LR_CORE0 - 82599-specific
+ * 6    SFP_SR/LR_CORE1 - 82599-specific
+ */
+enum ixgbe_sfp_type {
+       ixgbe_sfp_type_da_cu = 0,
+       ixgbe_sfp_type_sr = 1,
+       ixgbe_sfp_type_lr = 2,
+       ixgbe_sfp_type_da_cu_core0 = 3,
+       ixgbe_sfp_type_da_cu_core1 = 4,
+       ixgbe_sfp_type_srlr_core0 = 5,
+       ixgbe_sfp_type_srlr_core1 = 6,
+       ixgbe_sfp_type_da_act_lmt_core0 = 7,
+       ixgbe_sfp_type_da_act_lmt_core1 = 8,
+       ixgbe_sfp_type_1g_cu_core0 = 9,
+       ixgbe_sfp_type_1g_cu_core1 = 10,
+       ixgbe_sfp_type_not_present = 0xFFFE,
+       ixgbe_sfp_type_unknown = 0xFFFF
+};
+
+enum ixgbe_media_type {
+       ixgbe_media_type_unknown = 0,
+       ixgbe_media_type_fiber,
+       ixgbe_media_type_copper,
+       ixgbe_media_type_backplane,
+       ixgbe_media_type_cx4,
+       ixgbe_media_type_virtual
+};
+
+/* Flow Control Settings */
+enum ixgbe_fc_mode {
+       ixgbe_fc_none = 0,
+       ixgbe_fc_rx_pause,
+       ixgbe_fc_tx_pause,
+       ixgbe_fc_full,
+       ixgbe_fc_default
+};
+
+/* Smart Speed Settings */
+#define IXGBE_SMARTSPEED_MAX_RETRIES   3
+enum ixgbe_smart_speed {
+       ixgbe_smart_speed_auto = 0,
+       ixgbe_smart_speed_on,
+       ixgbe_smart_speed_off
+};
+
+/* PCI bus types */
+enum ixgbe_bus_type {
+       ixgbe_bus_type_unknown = 0,
+       ixgbe_bus_type_pci,
+       ixgbe_bus_type_pcix,
+       ixgbe_bus_type_pci_express,
+       ixgbe_bus_type_reserved
+};
+
+/* PCI bus speeds */
+enum ixgbe_bus_speed {
+       ixgbe_bus_speed_unknown = 0,
+       ixgbe_bus_speed_33      = 33,
+       ixgbe_bus_speed_66      = 66,
+       ixgbe_bus_speed_100     = 100,
+       ixgbe_bus_speed_120     = 120,
+       ixgbe_bus_speed_133     = 133,
+       ixgbe_bus_speed_2500    = 2500,
+       ixgbe_bus_speed_5000    = 5000,
+       ixgbe_bus_speed_reserved
+};
+
+/* PCI bus widths */
+enum ixgbe_bus_width {
+       ixgbe_bus_width_unknown = 0,
+       ixgbe_bus_width_pcie_x1 = 1,
+       ixgbe_bus_width_pcie_x2 = 2,
+       ixgbe_bus_width_pcie_x4 = 4,
+       ixgbe_bus_width_pcie_x8 = 8,
+       ixgbe_bus_width_32      = 32,
+       ixgbe_bus_width_64      = 64,
+       ixgbe_bus_width_reserved
+};
+
+struct ixgbe_addr_filter_info {
+       u32 num_mc_addrs;
+       u32 rar_used_count;
+       u32 mta_in_use;
+       u32 overflow_promisc;
+       bool user_set_promisc;
+};
+
+/* Bus parameters */
+struct ixgbe_bus_info {
+       enum ixgbe_bus_speed speed;
+       enum ixgbe_bus_width width;
+       enum ixgbe_bus_type type;
+
+       u16 func;
+       u16 lan_id;
+};
+
+/* Flow control parameters */
+struct ixgbe_fc_info {
+       u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
+       u32 low_water; /* Flow Control Low-water */
+       u16 pause_time; /* Flow Control Pause timer */
+       bool send_xon; /* Flow control send XON */
+       bool strict_ieee; /* Strict IEEE mode */
+       bool disable_fc_autoneg; /* Do not autonegotiate FC */
+       bool fc_was_autonegged; /* Is current_mode the result of autonegging? */
+       enum ixgbe_fc_mode current_mode; /* FC mode in effect */
+       enum ixgbe_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+/* Statistics counters collected by the MAC */
+struct ixgbe_hw_stats {
+       u64 crcerrs;
+       u64 illerrc;
+       u64 errbc;
+       u64 mspdc;
+       u64 mpctotal;
+       u64 mpc[8];
+       u64 mlfc;
+       u64 mrfc;
+       u64 rlec;
+       u64 lxontxc;
+       u64 lxonrxc;
+       u64 lxofftxc;
+       u64 lxoffrxc;
+       u64 pxontxc[8];
+       u64 pxonrxc[8];
+       u64 pxofftxc[8];
+       u64 pxoffrxc[8];
+       u64 prc64;
+       u64 prc127;
+       u64 prc255;
+       u64 prc511;
+       u64 prc1023;
+       u64 prc1522;
+       u64 gprc;
+       u64 bprc;
+       u64 mprc;
+       u64 gptc;
+       u64 gorc;
+       u64 gotc;
+       u64 rnbc[8];
+       u64 ruc;
+       u64 rfc;
+       u64 roc;
+       u64 rjc;
+       u64 mngprc;
+       u64 mngpdc;
+       u64 mngptc;
+       u64 tor;
+       u64 tpr;
+       u64 tpt;
+       u64 ptc64;
+       u64 ptc127;
+       u64 ptc255;
+       u64 ptc511;
+       u64 ptc1023;
+       u64 ptc1522;
+       u64 mptc;
+       u64 bptc;
+       u64 xec;
+       u64 qprc[16];
+       u64 qptc[16];
+       u64 qbrc[16];
+       u64 qbtc[16];
+       u64 qprdc[16];
+       u64 pxon2offc[8];
+       u64 fdirustat_add;
+       u64 fdirustat_remove;
+       u64 fdirfstat_fadd;
+       u64 fdirfstat_fremove;
+       u64 fdirmatch;
+       u64 fdirmiss;
+       u64 fccrc;
+       u64 fclast;
+       u64 fcoerpdc;
+       u64 fcoeprc;
+       u64 fcoeptc;
+       u64 fcoedwrc;
+       u64 fcoedwtc;
+       u64 fcoe_noddp;
+       u64 fcoe_noddp_ext_buff;
+       u64 ldpcec;
+       u64 pcrc8ec;
+       u64 b2ospc;
+       u64 b2ogprc;
+       u64 o2bgptc;
+       u64 o2bspc;
+};
+
+/* forward declaration */
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+                                  u32 *vmdq);
+
+/* Function pointer table */
+struct ixgbe_eeprom_operations {
+       s32 (*init_params)(struct ixgbe_hw *);
+       s32 (*read)(struct ixgbe_hw *, u16, u16 *);
+       s32 (*read_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+       s32 (*write)(struct ixgbe_hw *, u16, u16);
+       s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *);
+       s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
+       s32 (*update_checksum)(struct ixgbe_hw *);
+       u16 (*calc_checksum)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mac_operations {
+       s32 (*init_hw)(struct ixgbe_hw *);
+       s32 (*reset_hw)(struct ixgbe_hw *);
+       s32 (*start_hw)(struct ixgbe_hw *);
+       s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+       void (*enable_relaxed_ordering)(struct ixgbe_hw *);
+       enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+       u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+       s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+       s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
+       s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *);
+       s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
+       s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
+       s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *);
+       s32 (*stop_adapter)(struct ixgbe_hw *);
+       s32 (*get_bus_info)(struct ixgbe_hw *);
+       void (*set_lan_id)(struct ixgbe_hw *);
+       s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*);
+       s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8);
+       s32 (*setup_sfp)(struct ixgbe_hw *);
+       s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
+       s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
+       void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+
+       /* Link */
+       void (*disable_tx_laser)(struct ixgbe_hw *);
+       void (*enable_tx_laser)(struct ixgbe_hw *);
+       void (*flap_tx_laser)(struct ixgbe_hw *);
+       s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+       s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+       s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+                                    bool *);
+
+       /* Packet Buffer manipulation */
+       void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int);
+
+       /* LED */
+       s32 (*led_on)(struct ixgbe_hw *, u32);
+       s32 (*led_off)(struct ixgbe_hw *, u32);
+       s32 (*blink_led_start)(struct ixgbe_hw *, u32);
+       s32 (*blink_led_stop)(struct ixgbe_hw *, u32);
+
+       /* RAR, Multicast, VLAN */
+       s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32);
+       s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
+       s32 (*clear_rar)(struct ixgbe_hw *, u32);
+       s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32);
+       s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
+       s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
+       s32 (*init_rx_addrs)(struct ixgbe_hw *);
+       s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+                                  ixgbe_mc_addr_itr);
+       s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+                                  ixgbe_mc_addr_itr, bool clear);
+       s32 (*enable_mc)(struct ixgbe_hw *);
+       s32 (*disable_mc)(struct ixgbe_hw *);
+       s32 (*clear_vfta)(struct ixgbe_hw *);
+       s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+       s32 (*init_uta_tables)(struct ixgbe_hw *);
+       void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
+       void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+
+       /* Flow Control */
+       s32 (*fc_enable)(struct ixgbe_hw *, s32);
+
+       /* Manageability interface */
+       s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
+};
+
+struct ixgbe_phy_operations {
+       s32 (*identify)(struct ixgbe_hw *);
+       s32 (*identify_sfp)(struct ixgbe_hw *);
+       s32 (*init)(struct ixgbe_hw *);
+       s32 (*reset)(struct ixgbe_hw *);
+       s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
+       s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+       s32 (*setup_link)(struct ixgbe_hw *);
+       s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool,
+                               bool);
+       s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
+       s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *);
+       s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *);
+       s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8);
+       s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
+       s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
+       void (*i2c_bus_clear)(struct ixgbe_hw *);
+       s32 (*check_overtemp)(struct ixgbe_hw *);
+};
+
+struct ixgbe_eeprom_info {
+       struct ixgbe_eeprom_operations  ops;
+       enum ixgbe_eeprom_type          type;
+       u32                             semaphore_delay;
+       u16                             word_size;
+       u16                             address_bits;
+       u16                             word_page_size;
+};
+
+#define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED      0x01
+struct ixgbe_mac_info {
+       struct ixgbe_mac_operations     ops;
+       enum ixgbe_mac_type             type;
+       u8                              addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8                              perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8                              san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+       /* prefix for World Wide Node Name (WWNN) */
+       u16                             wwnn_prefix;
+       /* prefix for World Wide Port Name (WWPN) */
+       u16                             wwpn_prefix;
+#define IXGBE_MAX_MTA                  128
+       u32                             mta_shadow[IXGBE_MAX_MTA];
+       s32                             mc_filter_type;
+       u32                             mcft_size;
+       u32                             vft_size;
+       u32                             num_rar_entries;
+       u32                             rar_highwater;
+       u32                             rx_pb_size;
+       u32                             max_tx_queues;
+       u32                             max_rx_queues;
+       u32                             max_msix_vectors;
+       bool                            msix_vectors_from_pcie;
+       u32                             orig_autoc;
+       bool                            arc_subsystem_valid;
+       u32                             orig_autoc2;
+       bool                            orig_link_settings_stored;
+       bool                            autotry_restart;
+       u8                              flags;
+};
+
+struct ixgbe_phy_info {
+       struct ixgbe_phy_operations     ops;
+       enum ixgbe_phy_type             type;
+       u32                             addr;
+       u32                             id;
+       enum ixgbe_sfp_type             sfp_type;
+       bool                            sfp_setup_needed;
+       u32                             revision;
+       enum ixgbe_media_type           media_type;
+       bool                            reset_disable;
+       ixgbe_autoneg_advertised        autoneg_advertised;
+       enum ixgbe_smart_speed          smart_speed;
+       bool                            smart_speed_active;
+       bool                            multispeed_fiber;
+       bool                            reset_if_overtemp;
+};
+
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+       void (*init_params)(struct ixgbe_hw *hw);
+       s32  (*read)(struct ixgbe_hw *, u32 *, u16,  u16);
+       s32  (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+       s32  (*read_posted)(struct ixgbe_hw *, u32 *, u16,  u16);
+       s32  (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+       s32  (*check_for_msg)(struct ixgbe_hw *, u16);
+       s32  (*check_for_ack)(struct ixgbe_hw *, u16);
+       s32  (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+       u32 msgs_tx;
+       u32 msgs_rx;
+
+       u32 acks;
+       u32 reqs;
+       u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+       struct ixgbe_mbx_operations ops;
+       struct ixgbe_mbx_stats stats;
+       u32 timeout;
+       u32 usec_delay;
+       u32 v2p_mailbox;
+       u16 size;
+};
+
+struct ixgbe_hw {
+       u8                              *hw_addr;
+       void                            *back;
+       struct ixgbe_mac_info           mac;
+       struct ixgbe_addr_filter_info   addr_ctrl;
+       struct ixgbe_fc_info            fc;
+       struct ixgbe_phy_info           phy;
+       struct ixgbe_eeprom_info        eeprom;
+       struct ixgbe_bus_info           bus;
+       struct ixgbe_mbx_info           mbx;
+       u16                             device_id;
+       u16                             vendor_id;
+       u16                             subsystem_device_id;
+       u16                             subsystem_vendor_id;
+       u8                              revision_id;
+       bool                            adapter_stopped;
+       bool                            force_full_reset;
+};
+
+#define ixgbe_call_func(hw, func, params, error) \
+                (func != NULL) ? func params : error
+
+
+/* Error Codes */
+#define IXGBE_SUCCESS                           0
+#define IXGBE_ERR_EEPROM                        -1
+#define IXGBE_ERR_EEPROM_CHECKSUM               -2
+#define IXGBE_ERR_PHY                           -3
+#define IXGBE_ERR_CONFIG                        -4
+#define IXGBE_ERR_PARAM                         -5
+#define IXGBE_ERR_MAC_TYPE                      -6
+#define IXGBE_ERR_UNKNOWN_PHY                   -7
+#define IXGBE_ERR_LINK_SETUP                    -8
+#define IXGBE_ERR_ADAPTER_STOPPED               -9
+#define IXGBE_ERR_INVALID_MAC_ADDR              -10
+#define IXGBE_ERR_DEVICE_NOT_SUPPORTED          -11
+#define IXGBE_ERR_MASTER_REQUESTS_PENDING       -12
+#define IXGBE_ERR_INVALID_LINK_SETTINGS         -13
+#define IXGBE_ERR_AUTONEG_NOT_COMPLETE          -14
+#define IXGBE_ERR_RESET_FAILED                  -15
+#define IXGBE_ERR_SWFW_SYNC                     -16
+#define IXGBE_ERR_PHY_ADDR_INVALID              -17
+#define IXGBE_ERR_I2C                           -18
+#define IXGBE_ERR_SFP_NOT_SUPPORTED             -19
+#define IXGBE_ERR_SFP_NOT_PRESENT               -20
+#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT       -21
+#define IXGBE_ERR_NO_SAN_ADDR_PTR               -22
+#define IXGBE_ERR_FDIR_REINIT_FAILED            -23
+#define IXGBE_ERR_EEPROM_VERSION                -24
+#define IXGBE_ERR_NO_SPACE                      -25
+#define IXGBE_ERR_OVERTEMP                      -26
+#define IXGBE_ERR_FC_NOT_NEGOTIATED             -27
+#define IXGBE_ERR_FC_NOT_SUPPORTED              -28
+#define IXGBE_ERR_FLOW_CONTROL                  -29
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
+#define IXGBE_ERR_PBA_SECTION                   -31
+#define IXGBE_ERR_INVALID_ARGUMENT              -32
+#define IXGBE_ERR_HOST_INTERFACE_COMMAND        -33
+#define IXGBE_ERR_OUT_OF_MEM                    -34
+
+#define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
+
+
+#endif /* _IXGBE_TYPE_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.c
new file mode 100644 (file)
index 0000000..422c5c8
--- /dev/null
@@ -0,0 +1,524 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixgbe_api.h"
+#include "ixgbe_type.h"
+#include "ixgbe_vf.h"
+
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw);
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw);
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw);
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr);
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
+                                  ixgbe_link_speed speed, bool autoneg,
+                                  bool autoneg_wait_to_complete);
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                            bool *link_up, bool autoneg_wait_to_complete);
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                     u32 enable_addr);
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr);
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                u32 mc_addr_count, ixgbe_mc_addr_itr,
+                                bool clear);
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on);
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ *  ixgbe_init_ops_vf - Initialize the pointers for vf
+ *  @hw: pointer to hardware structure
+ *
+ *  This will assign function pointers, adapter-specific functions can
+ *  override the assignment of generic function pointers by assigning
+ *  their own adapter-specific function pointers.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+       /* MAC */
+       hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+       hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+       hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+       /* Cannot clear stats on VF */
+       hw->mac.ops.clear_hw_cntrs = NULL;
+       hw->mac.ops.get_media_type = NULL;
+       hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+       hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+       hw->mac.ops.get_bus_info = NULL;
+
+       /* Link */
+       hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+       hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+       hw->mac.ops.get_link_capabilities = NULL;
+
+       /* RAR, Multicast, VLAN */
+       hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+       hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+       hw->mac.ops.init_rx_addrs = NULL;
+       hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+       hw->mac.ops.enable_mc = NULL;
+       hw->mac.ops.disable_mc = NULL;
+       hw->mac.ops.clear_vfta = NULL;
+       hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+
+       hw->mac.max_tx_queues = 1;
+       hw->mac.max_rx_queues = 1;
+
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware by filling the bus info structure and media type, clears
+ *  all on chip counters, initializes receive address registers, multicast
+ *  table, VLAN filter table, calls routine to set up link and flow control
+ *  settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+s32 ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+       /* Clear adapter stopped flag */
+       hw->adapter_stopped = FALSE;
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_init_hw_vf - virtual function hardware initialization
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the hardware by resetting the hardware and then starting
+ *  the hardware
+ **/
+s32 ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+       s32 status = hw->mac.ops.start_hw(hw);
+
+       hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+       return status;
+}
+
+/**
+ *  ixgbe_reset_hw_vf - Performs hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by reseting the transmit and receive units, masks and
+ *  clears all interrupts.
+ **/
+s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+       s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+       u32 ctrl, msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+       u8 *addr = (u8 *)(&msgbuf[1]);
+
+       DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       hw->mac.ops.stop_adapter(hw);
+
+       DEBUGOUT("Issuing a function level reset to MAC\n");
+
+       ctrl = IXGBE_VFREAD_REG(hw, IXGBE_VFCTRL) | IXGBE_CTRL_RST;
+       IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       msec_delay(50);
+
+       /* we cannot reset while the RSTI / RSTD bits are asserted */
+       while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+               timeout--;
+               usec_delay(5);
+       }
+
+       if (timeout) {
+               /* mailbox timeout can now become active */
+               mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+               msgbuf[0] = IXGBE_VF_RESET;
+               mbx->ops.write_posted(hw, msgbuf, 1, 0);
+
+               msec_delay(10);
+
+               /*
+                * set our "perm_addr" based on info provided by PF
+                * also set up the mc_filter_type which is piggy backed
+                * on the mac address in word 3
+                */
+               ret_val = mbx->ops.read_posted(hw, msgbuf,
+                                              IXGBE_VF_PERMADDR_MSG_LEN, 0);
+               if (!ret_val) {
+                       if (msgbuf[0] == (IXGBE_VF_RESET |
+                                         IXGBE_VT_MSGTYPE_ACK)) {
+                               memcpy(hw->mac.perm_addr, addr,
+                                      IXGBE_ETH_LENGTH_OF_ADDRESS);
+                               hw->mac.mc_filter_type =
+                                       msgbuf[IXGBE_VF_MC_TYPE_WORD];
+                       } else {
+                               ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+                       }
+               }
+       }
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ *  disables transmit and receive units. The adapter_stopped flag is used by
+ *  the shared code and drivers to determine if the adapter is in a stopped
+ *  state and should not touch the hardware.
+ **/
+s32 ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+       u32 reg_val;
+       u16 i;
+
+       /*
+        * Set the adapter_stopped flag so other driver functions stop touching
+        * the hardware
+        */
+       hw->adapter_stopped = TRUE;
+
+       /* Clear interrupt mask to stop from interrupts being generated */
+       IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+       /* Clear any pending interrupts, flush previous writes */
+       IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+       /* Disable the transmit unit.  Each queue must be disabled. */
+       for (i = 0; i < hw->mac.max_tx_queues; i++)
+               IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+       /* Disable the receive unit by stopping each queue */
+       for (i = 0; i < hw->mac.max_rx_queues; i++) {
+               reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+               reg_val &= ~IXGBE_RXDCTL_ENABLE;
+               IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+       }
+
+       /* flush all queues disables */
+       IXGBE_WRITE_FLUSH(hw);
+       msec_delay(2);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_mta_vector - Determines bit-vector in multicast table to set
+ *  @hw: pointer to hardware structure
+ *  @mc_addr: the multicast address
+ *
+ *  Extracts the 12 bits, from a multicast address, to determine which
+ *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ *  incoming rx multicast addresses, to determine the bit-vector to check in
+ *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ *  by the MO field of the MCSTCTRL. The MO field is set during initialization
+ *  to mc_filter_type.
+ **/
+static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+       u32 vector = 0;
+
+       switch (hw->mac.mc_filter_type) {
+       case 0:   /* use bits [47:36] of the address */
+               vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+               break;
+       case 1:   /* use bits [46:35] of the address */
+               vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+               break;
+       case 2:   /* use bits [45:34] of the address */
+               vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+               break;
+       case 3:   /* use bits [43:32] of the address */
+               vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+               break;
+       default:  /* Invalid mc_filter_type */
+               DEBUGOUT("MC filter type param set incorrectly\n");
+               ASSERT(0);
+               break;
+       }
+
+       /* vector can only be 12-bits or boundary will be exceeded */
+       vector &= 0xFFF;
+       return vector;
+}
+
+/**
+ *  ixgbe_set_rar_vf - set device MAC address
+ *  @hw: pointer to hardware structure
+ *  @index: Receive address register to write
+ *  @addr: Address to put into receive address register
+ *  @vmdq: VMDq "set" or "pool" index
+ *  @enable_addr: set flag that address is active
+ **/
+s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+                     u32 enable_addr)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[3];
+       u8 *msg_addr = (u8 *)(&msgbuf[1]);
+       s32 ret_val;
+       UNREFERENCED_3PARAMETER(vmdq, enable_addr, index);
+
+       memset(msgbuf, 0, 12);
+       msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+       memcpy(msg_addr, addr, 6);
+       ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+       /* if nacked the address was rejected, use "perm_addr" */
+       if (!ret_val &&
+           (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+               ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @next: caller supplied function to return next address in list
+ *
+ *  Updates the Multicast Table Array.
+ **/
+s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+                                 u32 mc_addr_count, ixgbe_mc_addr_itr next,
+                                 bool clear)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+       u16 *vector_list = (u16 *)&msgbuf[1];
+       u32 vector;
+       u32 cnt, i;
+       u32 vmdq;
+
+       UNREFERENCED_1PARAMETER(clear);
+
+       DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+       /* Each entry in the list uses 1 16 bit word.  We have 30
+        * 16 bit words available in our HW msg buffer (minus 1 for the
+        * msg type).  That's 30 hash values if we pack 'em right.  If
+        * there are more than 30 MC addresses to add then punt the
+        * extras for now and then add code to handle more than 30 later.
+        * It would be unusual for a server to request that many multi-cast
+        * addresses except for in large enterprise network environments.
+        */
+
+       DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+       cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+       msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+       msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+       for (i = 0; i < cnt; i++) {
+               vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+               DEBUGOUT1("Hash value = 0x%03X\n", vector);
+               vector_list[i] = (u16)vector;
+       }
+
+       return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+}
+
+/**
+ *  ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ *  @hw: pointer to the HW structure
+ *  @vlan: 12 bit VLAN ID
+ *  @vind: unused by VF drivers
+ *  @vlan_on: if TRUE then set bit, else clear bit
+ **/
+s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[2];
+       UNREFERENCED_1PARAMETER(vind);
+
+       msgbuf[0] = IXGBE_VF_SET_VLAN;
+       msgbuf[1] = vlan;
+       /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+       msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+       return(mbx->ops.write_posted(hw, msgbuf, 2, 0));
+}
+
+/**
+ *  ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of transmit queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+       UNREFERENCED_1PARAMETER(hw);
+       return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ *  ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the number of receive queues for the given adapter.
+ **/
+u32 ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+       UNREFERENCED_1PARAMETER(hw);
+       return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ *  ixgbe_get_mac_addr_vf - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+s32 ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+       int i;
+
+       for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+               mac_addr[i] = hw->mac.perm_addr[i];
+
+       return IXGBE_SUCCESS;
+}
+
+s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+       struct ixgbe_mbx_info *mbx = &hw->mbx;
+       u32 msgbuf[3];
+       u8 *msg_addr = (u8 *)(&msgbuf[1]);
+       s32 ret_val;
+
+       memset(msgbuf, 0, sizeof(msgbuf));
+       /*
+        * If index is one then this is the start of a new list and needs
+        * indication to the PF so it can do it's own list management.
+        * If it is zero then that tells the PF to just clear all of
+        * this VF's macvlans and there is no new list.
+        */
+       msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+       msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+       if (addr)
+               memcpy(msg_addr, addr, 6);
+       ret_val = mbx->ops.write_posted(hw, msgbuf, 3, 0);
+
+       if (!ret_val)
+               ret_val = mbx->ops.read_posted(hw, msgbuf, 3, 0);
+
+       msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+       if (!ret_val)
+               if (msgbuf[0] ==
+                   (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
+                       ret_val = IXGBE_ERR_OUT_OF_MEM;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_setup_mac_link_vf - Setup MAC link settings
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Set the link speed in the AUTOC register and restarts link.
+ **/
+s32 ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw,
+                                  ixgbe_link_speed speed, bool autoneg,
+                                  bool autoneg_wait_to_complete)
+{
+       UNREFERENCED_4PARAMETER(hw, speed, autoneg, autoneg_wait_to_complete);
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_check_mac_link_vf - Get link/speed status
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @link_up: TRUE is link is up, FALSE otherwise
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ *  Reads the links register to determine if link is up and the current speed
+ **/
+s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+                            bool *link_up, bool autoneg_wait_to_complete)
+{
+       u32 links_reg;
+       UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
+
+       if (!(hw->mbx.ops.check_for_rst(hw, 0))) {
+               *link_up = FALSE;
+               *speed = 0;
+               return -1;
+       }
+
+       links_reg = IXGBE_VFREAD_REG(hw, IXGBE_VFLINKS);
+
+       if (links_reg & IXGBE_LINKS_UP)
+               *link_up = TRUE;
+       else
+               *link_up = FALSE;
+
+       if ((links_reg & IXGBE_LINKS_SPEED_10G_82599) ==
+           IXGBE_LINKS_SPEED_10G_82599)
+               *speed = IXGBE_LINK_SPEED_10GB_FULL;
+       else
+               *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+       return IXGBE_SUCCESS;
+}
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_vf.h
new file mode 100644 (file)
index 0000000..d0c4b34
--- /dev/null
@@ -0,0 +1,113 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#define IXGBE_VF_IRQ_CLEAR_MASK     7
+#define IXGBE_VF_MAX_TX_QUEUES      8
+#define IXGBE_VF_MAX_RX_QUEUES      8
+
+#define IXGBE_VFCTRL           0x00000
+#define IXGBE_VFSTATUS         0x00008
+#define IXGBE_VFLINKS          0x00010
+#define IXGBE_VFFRTIMER        0x00048
+#define IXGBE_VFRXMEMWRAP      0x03190
+#define IXGBE_VTEICR           0x00100
+#define IXGBE_VTEICS           0x00104
+#define IXGBE_VTEIMS           0x00108
+#define IXGBE_VTEIMC           0x0010C
+#define IXGBE_VTEIAC           0x00110
+#define IXGBE_VTEIAM           0x00114
+#define IXGBE_VTEITR(x)        (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x)        (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC      0x00140
+#define IXGBE_VTRSCINT(x)      (0x00180 + (4 * x))
+/* define IXGBE_VFPBACL  still says TBD in EAS */
+#define IXGBE_VFRDBAL(x)       (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x)       (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x)       (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x)         (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x)         (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x)      (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x)      (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x)      (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE        0x00300
+#define IXGBE_VFTDBAL(x)       (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x)       (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x)       (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x)         (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x)         (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x)      (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x)      (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x)      (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x)  (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x)  (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC           0x0101C
+#define IXGBE_VFGPTC           0x0201C
+#define IXGBE_VFGORC_LSB       0x01020
+#define IXGBE_VFGORC_MSB       0x01024
+#define IXGBE_VFGOTC_LSB       0x02020
+#define IXGBE_VFGOTC_MSB       0x02024
+#define IXGBE_VFMPRC           0x01034
+
+
+struct ixgbevf_hw_stats {
+       u64 base_vfgprc;
+       u64 base_vfgptc;
+       u64 base_vfgorc;
+       u64 base_vfgotc;
+       u64 base_vfmprc;
+
+       u64 last_vfgprc;
+       u64 last_vfgptc;
+       u64 last_vfgorc;
+       u64 last_vfgotc;
+       u64 last_vfmprc;
+
+       u64 vfgprc;
+       u64 vfgptc;
+       u64 vfgorc;
+       u64 vfgotc;
+       u64 vfmprc;
+
+       u64 saved_reset_vfgprc;
+       u64 saved_reset_vfgptc;
+       u64 saved_reset_vfgorc;
+       u64 saved_reset_vfgotc;
+       u64 saved_reset_vfmprc;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.c
new file mode 100644 (file)
index 0000000..d6fdcc4
--- /dev/null
@@ -0,0 +1,989 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixgbe_x540.h"
+#include "ixgbe_type.h"
+#include "ixgbe_api.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+                                      ixgbe_link_speed *speed,
+                                      bool *autoneg);
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw);
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+                               ixgbe_link_speed speed,
+                               bool autoneg, bool link_up_wait_to_complete);
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw);
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+                                u16 offset, u16 words, u16 *data);
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+                                 u16 offset, u16 words, u16 *data);
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val);
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw);
+
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+/**
+ *  ixgbe_init_ops_X540 - Inits func ptrs and MAC type
+ *  @hw: pointer to hardware structure
+ *
+ *  Initialize the function pointers and assign the MAC type for 82599.
+ *  Does not touch the hardware.
+ **/
+s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
+{
+       struct ixgbe_mac_info *mac = &hw->mac;
+       struct ixgbe_phy_info *phy = &hw->phy;
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       s32 ret_val;
+
+       DEBUGFUNC("ixgbe_init_ops_X540");
+
+       ret_val = ixgbe_init_phy_ops_generic(hw);
+       ret_val = ixgbe_init_ops_generic(hw);
+
+
+       /* EEPROM */
+       eeprom->ops.init_params = &ixgbe_init_eeprom_params_X540;
+       eeprom->ops.read = &ixgbe_read_eerd_X540;
+       eeprom->ops.read_buffer = &ixgbe_read_eerd_buffer_X540;
+       eeprom->ops.write = &ixgbe_write_eewr_X540;
+       eeprom->ops.write_buffer = &ixgbe_write_eewr_buffer_X540;
+       eeprom->ops.update_checksum = &ixgbe_update_eeprom_checksum_X540;
+       eeprom->ops.validate_checksum = &ixgbe_validate_eeprom_checksum_X540;
+       eeprom->ops.calc_checksum = &ixgbe_calc_eeprom_checksum_X540;
+
+       /* PHY */
+       phy->ops.init = &ixgbe_init_phy_ops_generic;
+       phy->ops.reset = NULL;
+
+       /* MAC */
+       mac->ops.reset_hw = &ixgbe_reset_hw_X540;
+       mac->ops.enable_relaxed_ordering = &ixgbe_enable_relaxed_ordering_gen2;
+       mac->ops.get_media_type = &ixgbe_get_media_type_X540;
+       mac->ops.get_supported_physical_layer =
+                                   &ixgbe_get_supported_physical_layer_X540;
+       mac->ops.read_analog_reg8 = NULL;
+       mac->ops.write_analog_reg8 = NULL;
+       mac->ops.start_hw = &ixgbe_start_hw_X540;
+       mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic;
+       mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_generic;
+       mac->ops.get_device_caps = &ixgbe_get_device_caps_generic;
+       mac->ops.get_wwn_prefix = &ixgbe_get_wwn_prefix_generic;
+       mac->ops.get_fcoe_boot_status = &ixgbe_get_fcoe_boot_status_generic;
+       mac->ops.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540;
+       mac->ops.release_swfw_sync = &ixgbe_release_swfw_sync_X540;
+
+       /* RAR, Multicast, VLAN */
+       mac->ops.set_vmdq = &ixgbe_set_vmdq_generic;
+       mac->ops.clear_vmdq = &ixgbe_clear_vmdq_generic;
+       mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_generic;
+       mac->rar_highwater = 1;
+       mac->ops.set_vfta = &ixgbe_set_vfta_generic;
+       mac->ops.clear_vfta = &ixgbe_clear_vfta_generic;
+       mac->ops.init_uta_tables = &ixgbe_init_uta_tables_generic;
+       mac->ops.set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing;
+       mac->ops.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing;
+
+       /* Link */
+       mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic;
+       mac->ops.setup_link = &ixgbe_setup_mac_link_X540;
+       mac->ops.setup_rxpba = &ixgbe_set_rxpba_generic;
+       mac->ops.check_link = &ixgbe_check_mac_link_generic;
+
+       mac->mcft_size        = 128;
+       mac->vft_size         = 128;
+       mac->num_rar_entries  = 128;
+       mac->rx_pb_size       = 384;
+       mac->max_tx_queues    = 128;
+       mac->max_rx_queues    = 128;
+       mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+       /*
+        * FWSM register
+        * ARC supported; valid only if manageability features are
+        * enabled.
+        */
+       mac->arc_subsystem_valid = (IXGBE_READ_REG(hw, IXGBE_FWSM) &
+                                  IXGBE_FWSM_MODE_MASK) ? TRUE : FALSE;
+
+       hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+
+       /* LEDs */
+       mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
+       mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540;
+
+       /* Manageability interface */
+       mac->ops.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic;
+
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_link_capabilities_X540 - Determines link capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: pointer to link speed
+ *  @negotiation: TRUE when autoneg or autotry is enabled
+ *
+ *  Determines the link capabilities by reading the AUTOC register.
+ **/
+s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed *speed,
+                                     bool *negotiation)
+{
+       ixgbe_get_copper_link_capabilities_generic(hw, speed, negotiation);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_get_media_type_X540 - Get media type
+ *  @hw: pointer to hardware structure
+ *
+ *  Returns the media type (fiber, copper, backplane)
+ **/
+enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+       UNREFERENCED_1PARAMETER(hw);
+       return ixgbe_media_type_copper;
+}
+
+/**
+ *  ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: TRUE if autonegotiation enabled
+ *  @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ **/
+s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+       DEBUGFUNC("ixgbe_setup_mac_link_X540");
+       return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+                                           autoneg_wait_to_complete);
+}
+
+/**
+ *  ixgbe_reset_hw_X540 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, and perform a reset.
+ **/
+s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u32 ctrl, i;
+
+       DEBUGFUNC("ixgbe_reset_hw_X540");
+
+       /* Call adapter stop to disable tx/rx and clear interrupts */
+       status = hw->mac.ops.stop_adapter(hw);
+       if (status != IXGBE_SUCCESS)
+               goto reset_hw_out;
+
+       /* flush pending Tx transactions */
+       ixgbe_clear_tx_pending(hw);
+
+mac_reset_top:
+       ctrl = IXGBE_CTRL_RST;
+       ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Poll for reset bit to self-clear indicating reset is complete */
+       for (i = 0; i < 10; i++) {
+               usec_delay(1);
+               ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+               if (!(ctrl & IXGBE_CTRL_RST_MASK))
+                       break;
+       }
+
+       if (ctrl & IXGBE_CTRL_RST_MASK) {
+               status = IXGBE_ERR_RESET_FAILED;
+               DEBUGOUT("Reset polling failed to complete.\n");
+       }
+       msec_delay(100);
+
+       /*
+        * Double resets are required for recovery from certain error
+        * conditions.  Between resets, it is necessary to stall to allow time
+        * for any pending HW events to complete.
+        */
+       if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+               hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+               goto mac_reset_top;
+       }
+
+       /* Set the Rx packet buffer size. */
+       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+       /* Store the permanent mac address */
+       hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+       /*
+        * Store MAC address from RAR0, clear receive address registers, and
+        * clear the multicast table.  Also reset num_rar_entries to 128,
+        * since we modify this value when programming the SAN MAC address.
+        */
+       hw->mac.num_rar_entries = 128;
+       hw->mac.ops.init_rx_addrs(hw);
+
+       /* Store the permanent SAN mac address */
+       hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+       /* Add the SAN MAC address to the RAR only if it's a valid address */
+       if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+               hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+                                   hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+               /* Reserve the last RAR for the SAN MAC address */
+               hw->mac.num_rar_entries--;
+       }
+
+       /* Store the alternative WWNN/WWPN prefix */
+       hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+                                      &hw->mac.wwpn_prefix);
+
+reset_hw_out:
+       return status;
+}
+
+/**
+ *  ixgbe_start_hw_X540 - Prepare hardware for Tx/Rx
+ *  @hw: pointer to hardware structure
+ *
+ *  Starts the hardware using the generic start_hw function
+ *  and the generation start_hw function.
+ *  Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
+{
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_start_hw_X540");
+
+       ret_val = ixgbe_start_hw_generic(hw);
+       if (ret_val != IXGBE_SUCCESS)
+               goto out;
+
+       ret_val = ixgbe_start_hw_gen2(hw);
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+       u16 ext_ability = 0;
+
+       DEBUGFUNC("ixgbe_get_supported_physical_layer_X540");
+
+       hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY,
+       IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability);
+       if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+       if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+       if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY)
+               physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+       return physical_layer;
+}
+
+/**
+ *  ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ *  @hw: pointer to hardware structure
+ *
+ *  Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ *  ixgbe_hw struct in order to set up EEPROM access.
+ **/
+s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+       struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+       u32 eec;
+       u16 eeprom_size;
+
+       DEBUGFUNC("ixgbe_init_eeprom_params_X540");
+
+       if (eeprom->type == ixgbe_eeprom_uninitialized) {
+               eeprom->semaphore_delay = 10;
+               eeprom->type = ixgbe_flash;
+
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+                                   IXGBE_EEC_SIZE_SHIFT);
+               eeprom->word_size = 1 << (eeprom_size +
+                                         IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+               DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+                         eeprom->type, eeprom->word_size);
+       }
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ *  ixgbe_read_eerd_X540- Read EEPROM word using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_read_eerd_X540");
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS)
+               status = ixgbe_read_eerd_generic(hw, offset, data);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ *  ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to read
+ *  @words: number of words
+ *  @data: word(s) read from the EEPROM
+ *
+ *  Reads a 16 bit word(s) from the EEPROM using the EERD register.
+ **/
+s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
+                                u16 offset, u16 words, u16 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_read_eerd_buffer_X540");
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS)
+               status = ixgbe_read_eerd_buffer_generic(hw, offset,
+                                                       words, data);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ *  ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @data: word write to the EEPROM
+ *
+ *  Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_write_eewr_X540");
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS)
+               status = ixgbe_write_eewr_generic(hw, offset, data);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ *  ixgbe_write_eewr_buffer_X540 - Write EEPROM word(s) using EEWR
+ *  @hw: pointer to hardware structure
+ *  @offset: offset of  word in the EEPROM to write
+ *  @words: number of words
+ *  @data: word(s) write to the EEPROM
+ *
+ *  Write a 16 bit word(s) to the EEPROM using the EEWR register.
+ **/
+s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
+                                 u16 offset, u16 words, u16 *data)
+{
+       s32 status = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_write_eewr_buffer_X540");
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS)
+               status = ixgbe_write_eewr_buffer_generic(hw, offset,
+                                                        words, data);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+       return status;
+}
+
+/**
+ *  ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ *
+ *  This function does not use synchronization for EERD and EEWR. It can
+ *  be used internally by function which utilize ixgbe_acquire_swfw_sync_X540.
+ *
+ *  @hw: pointer to hardware structure
+ **/
+u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+       u16 i;
+       u16 j;
+       u16 checksum = 0;
+       u16 length = 0;
+       u16 pointer = 0;
+       u16 word = 0;
+
+       /*
+        * Do not use hw->eeprom.ops.read because we do not want to take
+        * the synchronization semaphores here. Instead use
+        * ixgbe_read_eerd_generic
+        */
+
+       DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540");
+
+       /* Include 0x0-0x3F in the checksum */
+       for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+               if (ixgbe_read_eerd_generic(hw, i, &word) != IXGBE_SUCCESS) {
+                       DEBUGOUT("EEPROM read failed\n");
+                       break;
+               }
+               checksum += word;
+       }
+
+       /*
+        * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+        * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+        */
+       for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+               if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+                       continue;
+
+               if (ixgbe_read_eerd_generic(hw, i, &pointer) != IXGBE_SUCCESS) {
+                       DEBUGOUT("EEPROM read failed\n");
+                       break;
+               }
+
+               /* Skip pointer section if the pointer is invalid. */
+               if (pointer == 0xFFFF || pointer == 0 ||
+                   pointer >= hw->eeprom.word_size)
+                       continue;
+
+               if (ixgbe_read_eerd_generic(hw, pointer, &length)!=
+                   IXGBE_SUCCESS) {
+                       DEBUGOUT("EEPROM read failed\n");
+                       break;
+               }
+
+               /* Skip pointer section if length is invalid. */
+               if (length == 0xFFFF || length == 0 ||
+                   (pointer + length) >= hw->eeprom.word_size)
+                       continue;
+
+               for (j = pointer+1; j <= pointer+length; j++) {
+                       if (ixgbe_read_eerd_generic(hw, j, &word) !=
+                           IXGBE_SUCCESS) {
+                               DEBUGOUT("EEPROM read failed\n");
+                               break;
+                       }
+                       checksum += word;
+               }
+       }
+
+       checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+       return checksum;
+}
+
+/**
+ *  ixgbe_validate_eeprom_checksum_X540 - Validate EEPROM checksum
+ *  @hw: pointer to hardware structure
+ *  @checksum_val: calculated checksum
+ *
+ *  Performs checksum calculation and validates the EEPROM checksum.  If the
+ *  caller does not need checksum_val, the value can be NULL.
+ **/
+s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
+                                        u16 *checksum_val)
+{
+       s32 status;
+       u16 checksum;
+       u16 read_checksum = 0;
+
+       DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540");
+
+       /*
+        * Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+       if (status != IXGBE_SUCCESS) {
+               DEBUGOUT("EEPROM read failed\n");
+               goto out;
+       }
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS) {
+               checksum = hw->eeprom.ops.calc_checksum(hw);
+
+               /*
+                * Do not use hw->eeprom.ops.read because we do not want to take
+                * the synchronization semaphores twice here.
+               */
+               ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+                                       &read_checksum);
+
+               /*
+                * Verify read checksum from EEPROM is the same as
+                * calculated checksum
+                */
+               if (read_checksum != checksum)
+                       status = IXGBE_ERR_EEPROM_CHECKSUM;
+
+               /* If the user cares, return the calculated checksum */
+               if (checksum_val)
+                       *checksum_val = checksum;
+       } else {
+               status = IXGBE_ERR_SWFW_SYNC;
+       }
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+out:
+       return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+       s32 status;
+       u16 checksum;
+
+       DEBUGFUNC("ixgbe_update_eeprom_checksum_X540");
+
+       /*
+        * Read the first word from the EEPROM. If this times out or fails, do
+        * not continue or we could be in for a very long wait while every
+        * EEPROM read fails
+        */
+       status = hw->eeprom.ops.read(hw, 0, &checksum);
+
+       if (status != IXGBE_SUCCESS)
+               DEBUGOUT("EEPROM read failed\n");
+
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
+           IXGBE_SUCCESS) {
+               checksum = hw->eeprom.ops.calc_checksum(hw);
+
+               /*
+                * Do not use hw->eeprom.ops.write because we do not want to
+                * take the synchronization semaphores twice here.
+               */
+               status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
+                                                 checksum);
+
+       if (status == IXGBE_SUCCESS)
+               status = ixgbe_update_flash_X540(hw);
+       else
+               status = IXGBE_ERR_SWFW_SYNC;
+       }
+
+       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+
+       return status;
+}
+
+/**
+ *  ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ *  @hw: pointer to hardware structure
+ *
+ *  Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ *  EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+       u32 flup;
+       s32 status = IXGBE_ERR_EEPROM;
+
+       DEBUGFUNC("ixgbe_update_flash_X540");
+
+       status = ixgbe_poll_flash_update_done_X540(hw);
+       if (status == IXGBE_ERR_EEPROM) {
+               DEBUGOUT("Flash update time out\n");
+               goto out;
+       }
+
+       flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+       status = ixgbe_poll_flash_update_done_X540(hw);
+       if (status == IXGBE_SUCCESS)
+               DEBUGOUT("Flash update complete\n");
+       else
+               DEBUGOUT("Flash update time out\n");
+
+       if (hw->revision_id == 0) {
+               flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+               if (flup & IXGBE_EEC_SEC1VAL) {
+                       flup |= IXGBE_EEC_FLUP;
+                       IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+               }
+
+               status = ixgbe_poll_flash_update_done_X540(hw);
+               if (status == IXGBE_SUCCESS)
+                       DEBUGOUT("Flash update complete\n");
+               else
+                       DEBUGOUT("Flash update time out\n");
+       }
+out:
+       return status;
+}
+
+/**
+ *  ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ *  @hw: pointer to hardware structure
+ *
+ *  Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ *  flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+       u32 i;
+       u32 reg;
+       s32 status = IXGBE_ERR_EEPROM;
+
+       DEBUGFUNC("ixgbe_poll_flash_update_done_X540");
+
+       for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+               reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+               if (reg & IXGBE_EEC_FLUDONE) {
+                       status = IXGBE_SUCCESS;
+                       break;
+               }
+               usec_delay(5);
+       }
+       return status;
+}
+
+/**
+ *  ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to acquire
+ *
+ *  Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ *  the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+       u32 fwmask = mask << 5;
+       u32 hwmask = 0;
+       u32 timeout = 200;
+       u32 i;
+       s32 ret_val = IXGBE_SUCCESS;
+
+       DEBUGFUNC("ixgbe_acquire_swfw_sync_X540");
+
+       if (swmask == IXGBE_GSSR_EEP_SM)
+               hwmask = IXGBE_GSSR_FLASH_SM;
+
+       /* SW only mask doesn't have FW bit pair */
+       if (swmask == IXGBE_GSSR_SW_MNG_SM)
+               fwmask = 0;
+
+       for (i = 0; i < timeout; i++) {
+               /*
+                * SW NVM semaphore bit is used for access to all
+                * SW_FW_SYNC bits (not just NVM)
+                */
+               if (ixgbe_get_swfw_sync_semaphore(hw)) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
+               }
+
+               swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+               if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+                       swfw_sync |= swmask;
+                       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       msec_delay(5);
+                       goto out;
+               } else {
+                       /*
+                        * Firmware currently using resource (fwmask), hardware currently
+                        * using resource (hwmask), or other software thread currently
+                        * using resource (swmask)
+                        */
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       msec_delay(5);
+               }
+       }
+
+       /* Failed to get SW only semaphore */
+       if (swmask == IXGBE_GSSR_SW_MNG_SM) {
+               ret_val = IXGBE_ERR_SWFW_SYNC;
+               goto out;
+       }
+
+       /* If the resource is not released by the FW/HW the SW can assume that
+        * the FW/HW malfunctions. In that case the SW should sets the SW bit(s)
+        * of the requested resource(s) while ignoring the corresponding FW/HW
+        * bits in the SW_FW_SYNC register.
+        */
+       swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       if (swfw_sync & (fwmask| hwmask)) {
+               if (ixgbe_get_swfw_sync_semaphore(hw)) {
+                       ret_val = IXGBE_ERR_SWFW_SYNC;
+                       goto out;
+               }
+
+               swfw_sync |= swmask;
+               IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+               ixgbe_release_swfw_sync_semaphore(hw);
+               msec_delay(5);
+       }
+
+out:
+       return ret_val;
+}
+
+/**
+ *  ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ *  @hw: pointer to hardware structure
+ *  @mask: Mask to specify which semaphore to release
+ *
+ *  Releases the SWFW semaphore throught the SW_FW_SYNC register
+ *  for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+       u32 swfw_sync;
+       u32 swmask = mask;
+
+       DEBUGFUNC("ixgbe_release_swfw_sync_X540");
+
+       ixgbe_get_swfw_sync_semaphore(hw);
+
+       swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       swfw_sync &= ~swmask;
+       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+       ixgbe_release_swfw_sync_semaphore(hw);
+       msec_delay(5);
+}
+
+/**
+ *  ixgbe_get_nvm_semaphore - Get hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+       s32 status = IXGBE_ERR_EEPROM;
+       u32 timeout = 2000;
+       u32 i;
+       u32 swsm;
+
+       DEBUGFUNC("ixgbe_get_swfw_sync_semaphore");
+
+       /* Get SMBI software semaphore between device drivers first */
+       for (i = 0; i < timeout; i++) {
+               /*
+                * If the SMBI bit is 0 when we read it, then the bit will be
+                * set and we have the semaphore
+                */
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+               if (!(swsm & IXGBE_SWSM_SMBI)) {
+                       status = IXGBE_SUCCESS;
+                       break;
+               }
+               usec_delay(50);
+       }
+
+       /* Now get the semaphore between SW/FW through the REGSMP bit */
+       if (status == IXGBE_SUCCESS) {
+               for (i = 0; i < timeout; i++) {
+                       swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+                       if (!(swsm & IXGBE_SWFW_REGSMP))
+                               break;
+
+                       usec_delay(50);
+               }
+
+               /*
+                * Release semaphores and return error if SW NVM semaphore
+                * was not granted because we don't have access to the EEPROM
+                */
+               if (i >= timeout) {
+                       DEBUGOUT("REGSMP Software NVM semaphore not granted.\n");
+                       ixgbe_release_swfw_sync_semaphore(hw);
+                       status = IXGBE_ERR_EEPROM;
+               }
+       } else {
+               DEBUGOUT("Software semaphore SMBI between device drivers "
+                        "not granted.\n");
+       }
+
+       return status;
+}
+
+/**
+ *  ixgbe_release_nvm_semaphore - Release hardware semaphore
+ *  @hw: pointer to hardware structure
+ *
+ *  This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+       u32 swsm;
+
+       DEBUGFUNC("ixgbe_release_swfw_sync_semaphore");
+
+       /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+       swsm &= ~IXGBE_SWSM_SMBI;
+       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+       swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+       swsm &= ~IXGBE_SWFW_REGSMP;
+       IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_blink_led_start_X540 - Blink LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to blink
+ *
+ * Devices that implement the version 2 interface:
+ *   X540
+ **/
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
+{
+       u32 macc_reg;
+       u32 ledctl_reg;
+
+       DEBUGFUNC("ixgbe_blink_led_start_X540");
+
+       /*
+        * In order for the blink bit in the LED control register
+        * to work, link and speed must be forced in the MAC. We
+        * will reverse this when we stop the blinking.
+        */
+       macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+       macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+       IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+
+       /* Set the LED to LINK_UP + BLINK. */
+       ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+       ledctl_reg |= IXGBE_LED_BLINK(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_blink_led_stop_X540 - Stop blinking LED based on index.
+ * @hw: pointer to hardware structure
+ * @index: led number to stop blinking
+ *
+ * Devices that implement the version 2 interface:
+ *   X540
+ **/
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
+{
+       u32 macc_reg;
+       u32 ledctl_reg;
+
+       DEBUGFUNC("ixgbe_blink_led_stop_X540");
+
+       /* Restore the LED to its default value. */
+       ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+       ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
+       ledctl_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index);
+       ledctl_reg &= ~IXGBE_LED_BLINK(index);
+       IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg);
+
+       /* Unforce link and speed in the MAC. */
+       macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+       macc_reg &= ~(IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS);
+       IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+       IXGBE_WRITE_FLUSH(hw);
+
+       return IXGBE_SUCCESS;
+}
+
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h b/lib/librte_pmd_ixgbe/ixgbe/ixgbe_x540.h
new file mode 100644 (file)
index 0000000..0939449
--- /dev/null
@@ -0,0 +1,42 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifndef _IXGBE_X540_H_
+#define _IXGBE_X540_H_
+
+#include "ixgbe_type.h"
+
+s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
+s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
+#endif /* _IXGBE_X540_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixv.c b/lib/librte_pmd_ixgbe/ixgbe/ixv.c
new file mode 100644 (file)
index 0000000..93b25be
--- /dev/null
@@ -0,0 +1,4010 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#endif
+
+#include "ixv.h"
+
+/*********************************************************************
+ *  Driver version
+ *********************************************************************/
+char ixv_driver_version[] = "1.1.2";
+
+/*********************************************************************
+ *  PCI Device ID Table
+ *
+ *  Used by probe to select devices to load on
+ *  Last field stores an index into ixv_strings
+ *  Last entry must be all 0s
+ *
+ *  { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index }
+ *********************************************************************/
+
+static ixv_vendor_info_t ixv_vendor_info_array[] =
+{
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, 0, 0, 0},
+       {IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, 0, 0, 0},
+       /* required last entry */
+       {0, 0, 0, 0, 0}
+};
+
+/*********************************************************************
+ *  Table of branding strings
+ *********************************************************************/
+
+static char    *ixv_strings[] = {
+       "Intel(R) PRO/10GbE Virtual Function Network Driver"
+};
+
+/*********************************************************************
+ *  Function prototypes
+ *********************************************************************/
+static int      ixv_probe(device_t);
+static int      ixv_attach(device_t);
+static int      ixv_detach(device_t);
+static int      ixv_shutdown(device_t);
+#if __FreeBSD_version < 800000
+static void     ixv_start(struct ifnet *);
+static void     ixv_start_locked(struct tx_ring *, struct ifnet *);
+#else
+static int     ixv_mq_start(struct ifnet *, struct mbuf *);
+static int     ixv_mq_start_locked(struct ifnet *,
+                   struct tx_ring *, struct mbuf *);
+static void    ixv_qflush(struct ifnet *);
+#endif
+static int      ixv_ioctl(struct ifnet *, u_long, caddr_t);
+static void    ixv_init(void *);
+static void    ixv_init_locked(struct adapter *);
+static void     ixv_stop(void *);
+static void     ixv_media_status(struct ifnet *, struct ifmediareq *);
+static int      ixv_media_change(struct ifnet *);
+static void     ixv_identify_hardware(struct adapter *);
+static int      ixv_allocate_pci_resources(struct adapter *);
+static int      ixv_allocate_msix(struct adapter *);
+static int     ixv_allocate_queues(struct adapter *);
+static int     ixv_setup_msix(struct adapter *);
+static void    ixv_free_pci_resources(struct adapter *);
+static void     ixv_local_timer(void *);
+static void     ixv_setup_interface(device_t, struct adapter *);
+static void     ixv_config_link(struct adapter *);
+
+static int      ixv_allocate_transmit_buffers(struct tx_ring *);
+static int     ixv_setup_transmit_structures(struct adapter *);
+static void    ixv_setup_transmit_ring(struct tx_ring *);
+static void     ixv_initialize_transmit_units(struct adapter *);
+static void     ixv_free_transmit_structures(struct adapter *);
+static void     ixv_free_transmit_buffers(struct tx_ring *);
+
+static int      ixv_allocate_receive_buffers(struct rx_ring *);
+static int      ixv_setup_receive_structures(struct adapter *);
+static int     ixv_setup_receive_ring(struct rx_ring *);
+static void     ixv_initialize_receive_units(struct adapter *);
+static void     ixv_free_receive_structures(struct adapter *);
+static void     ixv_free_receive_buffers(struct rx_ring *);
+
+static void     ixv_enable_intr(struct adapter *);
+static void     ixv_disable_intr(struct adapter *);
+static bool    ixv_txeof(struct tx_ring *);
+static bool    ixv_rxeof(struct ix_queue *, int);
+static void    ixv_rx_checksum(u32, struct mbuf *, u32);
+static void     ixv_set_multi(struct adapter *);
+static void     ixv_update_link_status(struct adapter *);
+static void    ixv_refresh_mbufs(struct rx_ring *, int);
+static int      ixv_xmit(struct tx_ring *, struct mbuf **);
+static int     ixv_sysctl_stats(SYSCTL_HANDLER_ARGS);
+static int     ixv_sysctl_debug(SYSCTL_HANDLER_ARGS);
+static int     ixv_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int     ixv_dma_malloc(struct adapter *, bus_size_t,
+                   struct ixv_dma_alloc *, int);
+static void     ixv_dma_free(struct adapter *, struct ixv_dma_alloc *);
+static void    ixv_add_rx_process_limit(struct adapter *, const char *,
+                   const char *, int *, int);
+static bool    ixv_tx_ctx_setup(struct tx_ring *, struct mbuf *);
+static bool    ixv_tso_setup(struct tx_ring *, struct mbuf *, u32 *);
+static void    ixv_set_ivar(struct adapter *, u8, u8, s8);
+static void    ixv_configure_ivars(struct adapter *);
+static u8 *    ixv_mc_array_itr(struct ixgbe_hw *, u8 **, u32 *);
+
+static void    ixv_setup_vlan_support(struct adapter *);
+static void    ixv_register_vlan(void *, struct ifnet *, u16);
+static void    ixv_unregister_vlan(void *, struct ifnet *, u16);
+
+static void    ixv_save_stats(struct adapter *);
+static void    ixv_init_stats(struct adapter *);
+static void    ixv_update_stats(struct adapter *);
+
+static __inline void ixv_rx_discard(struct rx_ring *, int);
+static __inline void ixv_rx_input(struct rx_ring *, struct ifnet *,
+                   struct mbuf *, u32);
+
+/* The MSI/X Interrupt handlers */
+static void    ixv_msix_que(void *);
+static void    ixv_msix_mbx(void *);
+
+/* Deferred interrupt tasklets */
+static void    ixv_handle_que(void *, int);
+static void    ixv_handle_mbx(void *, int);
+
+/*********************************************************************
+ *  FreeBSD Device Interface Entry Points
+ *********************************************************************/
+
+static device_method_t ixv_methods[] = {
+       /* Device interface */
+       DEVMETHOD(device_probe, ixv_probe),
+       DEVMETHOD(device_attach, ixv_attach),
+       DEVMETHOD(device_detach, ixv_detach),
+       DEVMETHOD(device_shutdown, ixv_shutdown),
+       {0, 0}
+};
+
+static driver_t ixv_driver = {
+       "ix", ixv_methods, sizeof(struct adapter),
+};
+
+extern devclass_t ixgbe_devclass;
+DRIVER_MODULE(ixv, pci, ixv_driver, ixgbe_devclass, 0, 0);
+MODULE_DEPEND(ixv, pci, 1, 1, 1);
+MODULE_DEPEND(ixv, ether, 1, 1, 1);
+
+/*
+** TUNEABLE PARAMETERS:
+*/
+
+/*
+** AIM: Adaptive Interrupt Moderation
+** which means that the interrupt rate
+** is varied over time based on the
+** traffic for that interrupt vector
+*/
+static int ixv_enable_aim = FALSE;
+TUNABLE_INT("hw.ixv.enable_aim", &ixv_enable_aim);
+
+/* How many packets rxeof tries to clean at a time */
+static int ixv_rx_process_limit = 128;
+TUNABLE_INT("hw.ixv.rx_process_limit", &ixv_rx_process_limit);
+
+/* Flow control setting, default to full */
+static int ixv_flow_control = ixgbe_fc_full;
+TUNABLE_INT("hw.ixv.flow_control", &ixv_flow_control);
+
+/*
+ * Header split: this causes the hardware to DMA
+ * the header into a seperate mbuf from the payload,
+ * it can be a performance win in some workloads, but
+ * in others it actually hurts, its off by default.
+ */
+static bool ixv_header_split = FALSE;
+TUNABLE_INT("hw.ixv.hdr_split", &ixv_header_split);
+
+/*
+** Number of TX descriptors per ring,
+** setting higher than RX as this seems
+** the better performing choice.
+*/
+static int ixv_txd = DEFAULT_TXD;
+TUNABLE_INT("hw.ixv.txd", &ixv_txd);
+
+/* Number of RX descriptors per ring */
+static int ixv_rxd = DEFAULT_RXD;
+TUNABLE_INT("hw.ixv.rxd", &ixv_rxd);
+
+/*
+** Shadow VFTA table, this is needed because
+** the real filter table gets cleared during
+** a soft reset and we need to repopulate it.
+*/
+static u32 ixv_shadow_vfta[VFTA_SIZE];
+
+/*********************************************************************
+ *  Device identification routine
+ *
+ *  ixv_probe determines if the driver should be loaded on
+ *  adapter based on PCI vendor/device id of the adapter.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_probe(device_t dev)
+{
+       ixv_vendor_info_t *ent;
+
+       u16     pci_vendor_id = 0;
+       u16     pci_device_id = 0;
+       u16     pci_subvendor_id = 0;
+       u16     pci_subdevice_id = 0;
+       char    adapter_name[256];
+
+
+       pci_vendor_id = pci_get_vendor(dev);
+       if (pci_vendor_id != IXGBE_INTEL_VENDOR_ID)
+               return (ENXIO);
+
+       pci_device_id = pci_get_device(dev);
+       pci_subvendor_id = pci_get_subvendor(dev);
+       pci_subdevice_id = pci_get_subdevice(dev);
+
+       ent = ixv_vendor_info_array;
+       while (ent->vendor_id != 0) {
+               if ((pci_vendor_id == ent->vendor_id) &&
+                   (pci_device_id == ent->device_id) &&
+
+                   ((pci_subvendor_id == ent->subvendor_id) ||
+                    (ent->subvendor_id == 0)) &&
+
+                   ((pci_subdevice_id == ent->subdevice_id) ||
+                    (ent->subdevice_id == 0))) {
+                       sprintf(adapter_name, "%s, Version - %s",
+                               ixv_strings[ent->index],
+                               ixv_driver_version);
+                       device_set_desc_copy(dev, adapter_name);
+                       return (0);
+               }
+               ent++;
+       }
+       return (ENXIO);
+}
+
+/*********************************************************************
+ *  Device initialization routine
+ *
+ *  The attach entry point is called when the driver is being loaded.
+ *  This routine identifies the type of hardware, allocates all resources
+ *  and initializes the hardware.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_attach(device_t dev)
+{
+       struct adapter *adapter;
+       struct ixgbe_hw *hw;
+       int             error = 0;
+
+       INIT_DEBUGOUT("ixv_attach: begin");
+
+       /* Allocate, clear, and link in our adapter structure */
+       adapter = device_get_softc(dev);
+       adapter->dev = adapter->osdep.dev = dev;
+       hw = &adapter->hw;
+
+       /* Core Lock Init*/
+       IXV_CORE_LOCK_INIT(adapter, device_get_nameunit(dev));
+
+       /* SYSCTL APIs */
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixv_sysctl_stats, "I", "Statistics");
+
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixv_sysctl_debug, "I", "Debug Info");
+
+       SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "flow_control", CTLTYPE_INT | CTLFLAG_RW,
+                       adapter, 0, ixv_set_flowcntl, "I", "Flow Control");
+                
+       SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+                       SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+                       OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW,
+                       &ixv_enable_aim, 1, "Interrupt Moderation");
+
+       /* Set up the timer callout */
+       callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0);
+
+       /* Determine hardware revision */
+       ixv_identify_hardware(adapter);
+
+       /* Do base PCI setup - map BAR0 */
+       if (ixv_allocate_pci_resources(adapter)) {
+               device_printf(dev, "Allocation of PCI resources failed\n");
+               error = ENXIO;
+               goto err_out;
+       }
+
+       /* Do descriptor calc and sanity checks */
+       if (((ixv_txd * sizeof(union ixgbe_adv_tx_desc)) % DBA_ALIGN) != 0 ||
+           ixv_txd < MIN_TXD || ixv_txd > MAX_TXD) {
+               device_printf(dev, "TXD config issue, using default!\n");
+               adapter->num_tx_desc = DEFAULT_TXD;
+       } else
+               adapter->num_tx_desc = ixv_txd;
+
+       if (((ixv_rxd * sizeof(union ixgbe_adv_rx_desc)) % DBA_ALIGN) != 0 ||
+           ixv_rxd < MIN_TXD || ixv_rxd > MAX_TXD) {
+               device_printf(dev, "RXD config issue, using default!\n");
+               adapter->num_rx_desc = DEFAULT_RXD;
+       } else
+               adapter->num_rx_desc = ixv_rxd;
+
+       /* Allocate our TX/RX Queues */
+       if (ixv_allocate_queues(adapter)) {
+               error = ENOMEM;
+               goto err_out;
+       }
+
+       /*
+       ** Initialize the shared code: its
+       ** at this point the mac type is set.
+       */
+       error = ixgbe_init_shared_code(hw);
+       if (error) {
+               device_printf(dev,"Shared Code Initialization Failure\n");
+               error = EIO;
+               goto err_late;
+       }
+
+       /* Setup the mailbox */
+       ixgbe_init_mbx_params_vf(hw);
+
+       ixgbe_reset_hw(hw);
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = ixgbe_fc_full;
+       hw->fc.pause_time = IXV_FC_PAUSE;
+       hw->fc.low_water = IXV_FC_LO;
+       hw->fc.high_water[0] = IXV_FC_HI;
+       hw->fc.send_xon = TRUE;
+
+       error = ixgbe_init_hw(hw);
+       if (error) {
+               device_printf(dev,"Hardware Initialization Failure\n");
+               error = EIO;
+               goto err_late;
+       }
+       
+       error = ixv_allocate_msix(adapter); 
+       if (error) 
+               goto err_late;
+
+       /* Setup OS specific network interface */
+       ixv_setup_interface(dev, adapter);
+
+       /* Sysctl for limiting the amount of work done in the taskqueue */
+       ixv_add_rx_process_limit(adapter, "rx_processing_limit",
+           "max number of rx packets to process", &adapter->rx_process_limit,
+           ixv_rx_process_limit);
+
+       /* Do the stats setup */
+       ixv_save_stats(adapter);
+       ixv_init_stats(adapter);
+
+       /* Register for VLAN events */
+       adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
+           ixv_register_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+       adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
+           ixv_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST);
+
+       INIT_DEBUGOUT("ixv_attach: end");
+       return (0);
+
+err_late:
+       ixv_free_transmit_structures(adapter);
+       ixv_free_receive_structures(adapter);
+err_out:
+       ixv_free_pci_resources(adapter);
+       return (error);
+
+}
+
+/*********************************************************************
+ *  Device removal routine
+ *
+ *  The detach entry point is called when the driver is being removed.
+ *  This routine stops the adapter and deallocates all the resources
+ *  that were allocated for driver operation.
+ *
+ *  return 0 on success, positive on failure
+ *********************************************************************/
+
+static int
+ixv_detach(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       struct ix_queue *que = adapter->queues;
+
+       INIT_DEBUGOUT("ixv_detach: begin");
+
+       /* Make sure VLANS are not using driver */
+       if (adapter->ifp->if_vlantrunk != NULL) {
+               device_printf(dev,"Vlan in use, detach first\n");
+               return (EBUSY);
+       }
+
+       IXV_CORE_LOCK(adapter);
+       ixv_stop(adapter);
+       IXV_CORE_UNLOCK(adapter);
+
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               if (que->tq) {
+                       taskqueue_drain(que->tq, &que->que_task);
+                       taskqueue_free(que->tq);
+               }
+       }
+
+       /* Drain the Link queue */
+       if (adapter->tq) {
+               taskqueue_drain(adapter->tq, &adapter->mbx_task);
+               taskqueue_free(adapter->tq);
+       }
+
+       /* Unregister VLAN events */
+       if (adapter->vlan_attach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach);
+       if (adapter->vlan_detach != NULL)
+               EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach);
+
+       ether_ifdetach(adapter->ifp);
+       callout_drain(&adapter->timer);
+       ixv_free_pci_resources(adapter);
+       bus_generic_detach(dev);
+       if_free(adapter->ifp);
+
+       ixv_free_transmit_structures(adapter);
+       ixv_free_receive_structures(adapter);
+
+       IXV_CORE_LOCK_DESTROY(adapter);
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Shutdown entry point
+ *
+ **********************************************************************/
+static int
+ixv_shutdown(device_t dev)
+{
+       struct adapter *adapter = device_get_softc(dev);
+       IXV_CORE_LOCK(adapter);
+       ixv_stop(adapter);
+       IXV_CORE_UNLOCK(adapter);
+       return (0);
+}
+
+#if __FreeBSD_version < 800000
+/*********************************************************************
+ *  Transmit entry point
+ *
+ *  ixv_start is called by the stack to initiate a transmit.
+ *  The driver will remain in this routine as long as there are
+ *  packets to transmit and transmit resources are available.
+ *  In case resources are not available stack is notified and
+ *  the packet is requeued.
+ **********************************************************************/
+static void
+ixv_start_locked(struct tx_ring *txr, struct ifnet * ifp)
+{
+       struct mbuf    *m_head;
+       struct adapter *adapter = txr->adapter;
+
+       IXV_TX_LOCK_ASSERT(txr);
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING)
+               return;
+       if (!adapter->link_active)
+               return;
+
+       while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
+
+               IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
+               if (m_head == NULL)
+                       break;
+
+               if (ixv_xmit(txr, &m_head)) {
+                       if (m_head == NULL)
+                               break;
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
+                       break;
+               }
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, m_head);
+
+               /* Set watchdog on */
+               txr->watchdog_check = TRUE;
+               txr->watchdog_time = ticks;
+
+       }
+       return;
+}
+
+/*
+ * Legacy TX start - called by the stack, this
+ * always uses the first tx ring, and should
+ * not be used with multiqueue tx enabled.
+ */
+static void
+ixv_start(struct ifnet *ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               IXV_TX_LOCK(txr);
+               ixv_start_locked(txr, ifp);
+               IXV_TX_UNLOCK(txr);
+       }
+       return;
+}
+
+#else
+
+/*
+** Multiqueue Transmit driver
+**
+*/
+static int
+ixv_mq_start(struct ifnet *ifp, struct mbuf *m)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       int             i = 0, err = 0;
+
+       /* Which queue to use */
+       if ((m->m_flags & M_FLOWID) != 0)
+               i = m->m_pkthdr.flowid % adapter->num_queues;
+
+       txr = &adapter->tx_rings[i];
+       que = &adapter->queues[i];
+
+       if (IXV_TX_TRYLOCK(txr)) {
+               err = ixv_mq_start_locked(ifp, txr, m);
+               IXV_TX_UNLOCK(txr);
+       } else {
+               err = drbr_enqueue(ifp, txr->br, m);
+               taskqueue_enqueue(que->tq, &que->que_task);
+       }
+
+       return (err);
+}
+
+static int
+ixv_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m)
+{
+       struct adapter  *adapter = txr->adapter;
+        struct mbuf     *next;
+        int             enqueued, err = 0;
+
+       if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+           IFF_DRV_RUNNING || adapter->link_active == 0) {
+               if (m != NULL)
+                       err = drbr_enqueue(ifp, txr->br, m);
+               return (err);
+       }
+
+       /* Do a clean if descriptors are low */
+       if (txr->tx_avail <= IXV_TX_CLEANUP_THRESHOLD)
+               ixv_txeof(txr);
+
+       enqueued = 0;
+       if (m == NULL) {
+               next = drbr_dequeue(ifp, txr->br);
+       } else if (drbr_needs_enqueue(ifp, txr->br)) {
+               if ((err = drbr_enqueue(ifp, txr->br, m)) != 0)
+                       return (err);
+               next = drbr_dequeue(ifp, txr->br);
+       } else
+               next = m;
+
+       /* Process the queue */
+       while (next != NULL) {
+               if ((err = ixv_xmit(txr, &next)) != 0) {
+                       if (next != NULL)
+                               err = drbr_enqueue(ifp, txr->br, next);
+                       break;
+               }
+               enqueued++;
+               drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags);
+               /* Send a copy of the frame to the BPF listener */
+               ETHER_BPF_MTAP(ifp, next);
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+               if (txr->tx_avail <= IXV_TX_OP_THRESHOLD) {
+                       ifp->if_drv_flags |= IFF_DRV_OACTIVE;
+                       break;
+               }
+               next = drbr_dequeue(ifp, txr->br);
+       }
+
+       if (enqueued > 0) {
+               /* Set watchdog on */
+               txr->watchdog_check = TRUE;
+               txr->watchdog_time = ticks;
+       }
+
+       return (err);
+}
+
+/*
+** Flush all ring buffers
+*/
+static void     
+ixv_qflush(struct ifnet *ifp)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct mbuf     *m;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXV_TX_LOCK(txr);
+               while ((m = buf_ring_dequeue_sc(txr->br)) != NULL)
+                       m_freem(m);
+               IXV_TX_UNLOCK(txr);
+       }
+       if_qflush(ifp);
+}
+
+#endif
+
+/*********************************************************************
+ *  Ioctl entry point
+ *
+ *  ixv_ioctl is called when the user wants to configure the
+ *  interface.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+
+static int
+ixv_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       struct ifreq    *ifr = (struct ifreq *) data;
+#if defined(INET) || defined(INET6)
+       struct ifaddr *ifa = (struct ifaddr *)data;
+#endif
+       int             error = 0;
+       bool            avoid_reset = FALSE;
+
+       switch (command) {
+
+        case SIOCSIFADDR:
+#ifdef INET
+               if (ifa->ifa_addr->sa_family == AF_INET)
+                       avoid_reset = TRUE;
+#endif
+#ifdef INET6
+               if (ifa->ifa_addr->sa_family == AF_INET6)
+                       avoid_reset = TRUE;
+#endif
+               /*
+               ** Calling init results in link renegotiation,
+               ** so we avoid doing it when possible.
+               */
+               if (avoid_reset) {
+                       ifp->if_flags |= IFF_UP;
+                       if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+                               ixv_init(adapter);
+                       if (!(ifp->if_flags & IFF_NOARP))
+                               arp_ifinit(ifp, ifa);
+               } else
+                       error = ether_ioctl(ifp, command, data);
+               break;
+
+       case SIOCSIFMTU:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+               if (ifr->ifr_mtu > IXV_MAX_FRAME_SIZE - ETHER_HDR_LEN) {
+                       error = EINVAL;
+               } else {
+                       IXV_CORE_LOCK(adapter);
+                       ifp->if_mtu = ifr->ifr_mtu;
+                       adapter->max_frame_size =
+                               ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+                       ixv_init_locked(adapter);
+                       IXV_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFFLAGS:
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+               IXV_CORE_LOCK(adapter);
+               if (ifp->if_flags & IFF_UP) {
+                       if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                               ixv_init_locked(adapter);
+               } else
+                       if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+                               ixv_stop(adapter);
+               adapter->if_flags = ifp->if_flags;
+               IXV_CORE_UNLOCK(adapter);
+               break;
+       case SIOCADDMULTI:
+       case SIOCDELMULTI:
+               IOCTL_DEBUGOUT("ioctl: SIOC(ADD|DEL)MULTI");
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IXV_CORE_LOCK(adapter);
+                       ixv_disable_intr(adapter);
+                       ixv_set_multi(adapter);
+                       ixv_enable_intr(adapter);
+                       IXV_CORE_UNLOCK(adapter);
+               }
+               break;
+       case SIOCSIFMEDIA:
+       case SIOCGIFMEDIA:
+               IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+               error = ifmedia_ioctl(ifp, ifr, &adapter->media, command);
+               break;
+       case SIOCSIFCAP:
+       {
+               int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+               IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+               if (mask & IFCAP_HWCSUM)
+                       ifp->if_capenable ^= IFCAP_HWCSUM;
+               if (mask & IFCAP_TSO4)
+                       ifp->if_capenable ^= IFCAP_TSO4;
+               if (mask & IFCAP_LRO)
+                       ifp->if_capenable ^= IFCAP_LRO;
+               if (mask & IFCAP_VLAN_HWTAGGING)
+                       ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+               if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+                       IXV_CORE_LOCK(adapter);
+                       ixv_init_locked(adapter);
+                       IXV_CORE_UNLOCK(adapter);
+               }
+               VLAN_CAPABILITIES(ifp);
+               break;
+       }
+
+       default:
+               IOCTL_DEBUGOUT1("ioctl: UNKNOWN (0x%X)\n", (int)command);
+               error = ether_ioctl(ifp, command, data);
+               break;
+       }
+
+       return (error);
+}
+
+/*********************************************************************
+ *  Init entry point
+ *
+ *  This routine is used in two ways. It is used by the stack as
+ *  init entry point in network interface structure. It is also used
+ *  by the driver as a hw/sw initialization routine to get to a
+ *  consistent state.
+ *
+ *  return 0 on success, positive on failure
+ **********************************************************************/
+#define IXGBE_MHADD_MFS_SHIFT 16
+
+static void
+ixv_init_locked(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       device_t        dev = adapter->dev;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             mhadd, gpie;
+
+       INIT_DEBUGOUT("ixv_init: begin");
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+        callout_stop(&adapter->timer);
+
+        /* reprogram the RAR[0] in case user changed it. */
+        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       /* Get the latest mac address, User can use a LAA */
+       bcopy(IF_LLADDR(adapter->ifp), hw->mac.addr,
+            IXGBE_ETH_LENGTH_OF_ADDRESS);
+        ixgbe_set_rar(hw, 0, hw->mac.addr, 0, 1);
+       hw->addr_ctrl.rar_used_count = 1;
+
+       /* Prepare transmit descriptors and buffers */
+       if (ixv_setup_transmit_structures(adapter)) {
+               device_printf(dev,"Could not setup transmit structures\n");
+               ixv_stop(adapter);
+               return;
+       }
+
+       ixgbe_reset_hw(hw);
+       ixv_initialize_transmit_units(adapter);
+
+       /* Setup Multicast table */
+       ixv_set_multi(adapter);
+
+       /*
+       ** Determine the correct mbuf pool
+       ** for doing jumbo/headersplit
+       */
+       if (ifp->if_mtu > ETHERMTU)
+               adapter->rx_mbuf_sz = MJUMPAGESIZE;
+       else
+               adapter->rx_mbuf_sz = MCLBYTES;
+
+       /* Prepare receive descriptors and buffers */
+       if (ixv_setup_receive_structures(adapter)) {
+               device_printf(dev,"Could not setup receive structures\n");
+               ixv_stop(adapter);
+               return;
+       }
+
+       /* Configure RX settings */
+       ixv_initialize_receive_units(adapter);
+
+       /* Enable Enhanced MSIX mode */
+       gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE);
+       gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME;
+       gpie |= IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD;
+        IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+       /* Set the various hardware offload abilities */
+       ifp->if_hwassist = 0;
+       if (ifp->if_capenable & IFCAP_TSO4)
+               ifp->if_hwassist |= CSUM_TSO;
+       if (ifp->if_capenable & IFCAP_TXCSUM) {
+               ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+#if __FreeBSD_version >= 800000
+               ifp->if_hwassist |= CSUM_SCTP;
+#endif
+       }
+       
+       /* Set MTU size */
+       if (ifp->if_mtu > ETHERMTU) {
+               mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
+               mhadd &= ~IXGBE_MHADD_MFS_MASK;
+               mhadd |= adapter->max_frame_size << IXGBE_MHADD_MFS_SHIFT;
+               IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
+       }
+
+       /* Set up VLAN offload and filter */
+       ixv_setup_vlan_support(adapter);
+
+       callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+
+       /* Set up MSI/X routing */
+       ixv_configure_ivars(adapter);
+
+       /* Set up auto-mask */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, IXGBE_EICS_RTX_QUEUE);
+
+        /* Set moderation on the Link interrupt */
+        IXGBE_WRITE_REG(hw, IXGBE_VTEITR(adapter->mbxvec), IXV_LINK_ITR);
+
+       /* Stats init */
+       ixv_init_stats(adapter);
+
+       /* Config/Enable Link */
+       ixv_config_link(adapter);
+
+       /* And now turn on interrupts */
+       ixv_enable_intr(adapter);
+
+       /* Now inform the stack we're ready */
+       ifp->if_drv_flags |= IFF_DRV_RUNNING;
+       ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+
+       return;
+}
+
+static void
+ixv_init(void *arg)
+{
+       struct adapter *adapter = arg;
+
+       IXV_CORE_LOCK(adapter);
+       ixv_init_locked(adapter);
+       IXV_CORE_UNLOCK(adapter);
+       return;
+}
+
+
+/*
+**
+** MSIX Interrupt Handlers and Tasklets
+**
+*/
+
+static inline void
+ixv_enable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32     queue = 1 << vector;
+       u32     mask;
+
+       mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline void
+ixv_disable_queue(struct adapter *adapter, u32 vector)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u64     queue = (u64)(1 << vector);
+       u32     mask;
+
+       mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
+}
+
+static inline void
+ixv_rearm_queues(struct adapter *adapter, u64 queues)
+{
+       u32 mask = (IXGBE_EIMS_RTX_QUEUE & queues);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEICS, mask);
+}
+
+
+static void
+ixv_handle_que(void *context, int pending)
+{
+       struct ix_queue *que = context;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct ifnet    *ifp = adapter->ifp;
+       bool            more;
+
+       if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+               more = ixv_rxeof(que, adapter->rx_process_limit);
+               IXV_TX_LOCK(txr);
+               ixv_txeof(txr);
+#if __FreeBSD_version >= 800000
+               if (!drbr_empty(ifp, txr->br))
+                       ixv_mq_start_locked(ifp, txr, NULL);
+#else
+               if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
+                       ixv_start_locked(txr, ifp);
+#endif
+               IXV_TX_UNLOCK(txr);
+               if (more) {
+                       taskqueue_enqueue(que->tq, &que->que_task);
+                       return;
+               }
+       }
+
+       /* Reenable this interrupt */
+       ixv_enable_queue(adapter, que->msix);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  MSI Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixv_msix_que(void *arg)
+{
+       struct ix_queue *que = arg;
+       struct adapter  *adapter = que->adapter;
+       struct tx_ring  *txr = que->txr;
+       struct rx_ring  *rxr = que->rxr;
+       bool            more_tx, more_rx;
+       u32             newitr = 0;
+
+       ixv_disable_queue(adapter, que->msix);
+       ++que->irqs;
+
+       more_rx = ixv_rxeof(que, adapter->rx_process_limit);
+
+       IXV_TX_LOCK(txr);
+       more_tx = ixv_txeof(txr);
+       /*
+       ** Make certain that if the stack
+       ** has anything queued the task gets
+       ** scheduled to handle it.
+       */
+#if __FreeBSD_version < 800000
+       if (!IFQ_DRV_IS_EMPTY(&adapter->ifp->if_snd))
+#else
+       if (!drbr_empty(adapter->ifp, txr->br))
+#endif
+                more_tx = 1;
+       IXV_TX_UNLOCK(txr);
+
+       more_rx = ixv_rxeof(que, adapter->rx_process_limit);
+
+       /* Do AIM now? */
+
+       if (ixv_enable_aim == FALSE)
+               goto no_calc;
+       /*
+       ** Do Adaptive Interrupt Moderation:
+        **  - Write out last calculated setting
+       **  - Calculate based on average size over
+       **    the last interval.
+       */
+        if (que->eitr_setting)
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_VTEITR(que->msix),
+                   que->eitr_setting);
+        que->eitr_setting = 0;
+
+        /* Idle, do nothing */
+        if ((txr->bytes == 0) && (rxr->bytes == 0))
+                goto no_calc;
+                                
+       if ((txr->bytes) && (txr->packets))
+                       newitr = txr->bytes/txr->packets;
+       if ((rxr->bytes) && (rxr->packets))
+               newitr = max(newitr,
+                   (rxr->bytes / rxr->packets));
+       newitr += 24; /* account for hardware frame, crc */
+
+       /* set an upper boundary */
+       newitr = min(newitr, 3000);
+
+       /* Be nice to the mid range */
+       if ((newitr > 300) && (newitr < 1200))
+               newitr = (newitr / 3);
+       else
+               newitr = (newitr / 2);
+
+       newitr |= newitr << 16;
+                 
+        /* save for next interrupt */
+        que->eitr_setting = newitr;
+
+        /* Reset state */
+        txr->bytes = 0;
+        txr->packets = 0;
+        rxr->bytes = 0;
+        rxr->packets = 0;
+
+no_calc:
+       if (more_tx || more_rx)
+               taskqueue_enqueue(que->tq, &que->que_task);
+       else /* Reenable this interrupt */
+               ixv_enable_queue(adapter, que->msix);
+       return;
+}
+
+static void
+ixv_msix_mbx(void *arg)
+{
+       struct adapter  *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             reg;
+
+       ++adapter->mbx_irq;
+
+       /* First get the cause */
+       reg = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+       /* Clear interrupt with write */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEICR, reg);
+
+       /* Link status change */
+       if (reg & IXGBE_EICR_LSC)
+               taskqueue_enqueue(adapter->tq, &adapter->mbx_task);
+
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_EIMS_OTHER);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called whenever the user queries the status of
+ *  the interface using ifconfig.
+ *
+ **********************************************************************/
+static void
+ixv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+       struct adapter *adapter = ifp->if_softc;
+
+       INIT_DEBUGOUT("ixv_media_status: begin");
+       IXV_CORE_LOCK(adapter);
+       ixv_update_link_status(adapter);
+
+       ifmr->ifm_status = IFM_AVALID;
+       ifmr->ifm_active = IFM_ETHER;
+
+       if (!adapter->link_active) {
+               IXV_CORE_UNLOCK(adapter);
+               return;
+       }
+
+       ifmr->ifm_status |= IFM_ACTIVE;
+
+       switch (adapter->link_speed) {
+               case IXGBE_LINK_SPEED_1GB_FULL:
+                       ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
+                       break;
+               case IXGBE_LINK_SPEED_10GB_FULL:
+                       ifmr->ifm_active |= IFM_FDX;
+                       break;
+       }
+
+       IXV_CORE_UNLOCK(adapter);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Media Ioctl callback
+ *
+ *  This routine is called when the user changes speed/duplex using
+ *  media/mediopt option with ifconfig.
+ *
+ **********************************************************************/
+static int
+ixv_media_change(struct ifnet * ifp)
+{
+       struct adapter *adapter = ifp->if_softc;
+       struct ifmedia *ifm = &adapter->media;
+
+       INIT_DEBUGOUT("ixv_media_change: begin");
+
+       if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+               return (EINVAL);
+
+        switch (IFM_SUBTYPE(ifm->ifm_media)) {
+        case IFM_AUTO:
+                break;
+        default:
+                device_printf(adapter->dev, "Only auto media type\n");
+               return (EINVAL);
+        }
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  This routine maps the mbufs to tx descriptors, allowing the
+ *  TX engine to transmit the packets. 
+ *     - return 0 on success, positive on failure
+ *
+ **********************************************************************/
+
+static int
+ixv_xmit(struct tx_ring *txr, struct mbuf **m_headp)
+{
+       struct adapter  *adapter = txr->adapter;
+       u32             olinfo_status = 0, cmd_type_len;
+       u32             paylen = 0;
+       int             i, j, error, nsegs;
+       int             first, last = 0;
+       struct mbuf     *m_head;
+       bus_dma_segment_t segs[32];
+       bus_dmamap_t    map;
+       struct ixv_tx_buf *txbuf, *txbuf_mapped;
+       union ixgbe_adv_tx_desc *txd = NULL;
+
+       m_head = *m_headp;
+
+       /* Basic descriptor defines */
+        cmd_type_len = (IXGBE_ADVTXD_DTYP_DATA |
+           IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
+
+       if (m_head->m_flags & M_VLANTAG)
+               cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+        /*
+         * Important to capture the first descriptor
+         * used because it will contain the index of
+         * the one we tell the hardware to report back
+         */
+        first = txr->next_avail_desc;
+       txbuf = &txr->tx_buffers[first];
+       txbuf_mapped = txbuf;
+       map = txbuf->map;
+
+       /*
+        * Map the packet for DMA.
+        */
+       error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+           *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+       if (error == EFBIG) {
+               struct mbuf *m;
+
+               m = m_defrag(*m_headp, M_DONTWAIT);
+               if (m == NULL) {
+                       adapter->mbuf_defrag_failed++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (ENOBUFS);
+               }
+               *m_headp = m;
+
+               /* Try it again */
+               error = bus_dmamap_load_mbuf_sg(txr->txtag, map,
+                   *m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
+
+               if (error == ENOMEM) {
+                       adapter->no_tx_dma_setup++;
+                       return (error);
+               } else if (error != 0) {
+                       adapter->no_tx_dma_setup++;
+                       m_freem(*m_headp);
+                       *m_headp = NULL;
+                       return (error);
+               }
+       } else if (error == ENOMEM) {
+               adapter->no_tx_dma_setup++;
+               return (error);
+       } else if (error != 0) {
+               adapter->no_tx_dma_setup++;
+               m_freem(*m_headp);
+               *m_headp = NULL;
+               return (error);
+       }
+
+       /* Make certain there are enough descriptors */
+       if (nsegs > txr->tx_avail - 2) {
+               txr->no_desc_avail++;
+               error = ENOBUFS;
+               goto xmit_fail;
+       }
+       m_head = *m_headp;
+
+       /*
+       ** Set up the appropriate offload context
+       ** this becomes the first descriptor of 
+       ** a packet.
+       */
+       if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
+               if (ixv_tso_setup(txr, m_head, &paylen)) {
+                       cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+                       olinfo_status |= IXGBE_TXD_POPTS_IXSM << 8;
+                       olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+                       olinfo_status |= paylen << IXGBE_ADVTXD_PAYLEN_SHIFT;
+                       ++adapter->tso_tx;
+               } else
+                       return (ENXIO);
+       } else if (ixv_tx_ctx_setup(txr, m_head))
+               olinfo_status |= IXGBE_TXD_POPTS_TXSM << 8;
+
+        /* Record payload length */
+       if (paylen == 0)
+               olinfo_status |= m_head->m_pkthdr.len <<
+                   IXGBE_ADVTXD_PAYLEN_SHIFT;
+
+       i = txr->next_avail_desc;
+       for (j = 0; j < nsegs; j++) {
+               bus_size_t seglen;
+               bus_addr_t segaddr;
+
+               txbuf = &txr->tx_buffers[i];
+               txd = &txr->tx_base[i];
+               seglen = segs[j].ds_len;
+               segaddr = htole64(segs[j].ds_addr);
+
+               txd->read.buffer_addr = segaddr;
+               txd->read.cmd_type_len = htole32(txr->txd_cmd |
+                   cmd_type_len |seglen);
+               txd->read.olinfo_status = htole32(olinfo_status);
+               last = i; /* descriptor that will get completion IRQ */
+
+               if (++i == adapter->num_tx_desc)
+                       i = 0;
+
+               txbuf->m_head = NULL;
+               txbuf->eop_index = -1;
+       }
+
+       txd->read.cmd_type_len |=
+           htole32(IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS);
+       txr->tx_avail -= nsegs;
+       txr->next_avail_desc = i;
+
+       txbuf->m_head = m_head;
+       txbuf->map = map;
+       bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE);
+
+        /* Set the index of the descriptor that will be marked done */
+        txbuf = &txr->tx_buffers[first];
+       txbuf->eop_index = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       /*
+        * Advance the Transmit Descriptor Tail (Tdt), this tells the
+        * hardware that this frame is available to transmit.
+        */
+       ++txr->total_packets;
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(txr->me), i);
+
+       return (0);
+
+xmit_fail:
+       bus_dmamap_unload(txr->txtag, txbuf->map);
+       return (error);
+
+}
+
+
+/*********************************************************************
+ *  Multicast Update
+ *
+ *  This routine is called whenever multicast address list is updated.
+ *
+ **********************************************************************/
+#define IXGBE_RAR_ENTRIES 16
+
+static void
+ixv_set_multi(struct adapter *adapter)
+{
+       u8      mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+       u8      *update_ptr;
+       struct  ifmultiaddr *ifma;
+       int     mcnt = 0;
+       struct ifnet   *ifp = adapter->ifp;
+
+       IOCTL_DEBUGOUT("ixv_set_multi: begin");
+
+#if __FreeBSD_version < 800000
+       IF_ADDR_LOCK(ifp);
+#else
+       if_maddr_rlock(ifp);
+#endif
+       TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+               if (ifma->ifma_addr->sa_family != AF_LINK)
+                       continue;
+               bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr),
+                   &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+                   IXGBE_ETH_LENGTH_OF_ADDRESS);
+               mcnt++;
+       }
+#if __FreeBSD_version < 800000
+       IF_ADDR_UNLOCK(ifp);
+#else
+       if_maddr_runlock(ifp);
+#endif
+
+       update_ptr = mta;
+
+       ixgbe_update_mc_addr_list(&adapter->hw,
+           update_ptr, mcnt, ixv_mc_array_itr, TRUE);
+
+       return;
+}
+
+/*
+ * This is an iterator function now needed by the multicast
+ * shared code. It simply feeds the shared code routine the
+ * addresses in the array of ixv_set_multi() one by one.
+ */
+static u8 *
+ixv_mc_array_itr(struct ixgbe_hw *hw, u8 **update_ptr, u32 *vmdq)
+{
+       u8 *addr = *update_ptr;
+       u8 *newptr;
+       *vmdq = 0;
+
+       newptr = addr + IXGBE_ETH_LENGTH_OF_ADDRESS;
+       *update_ptr = newptr;
+       return addr;
+}
+
+/*********************************************************************
+ *  Timer routine
+ *
+ *  This routine checks for link status,updates statistics,
+ *  and runs the watchdog check.
+ *
+ **********************************************************************/
+
+static void
+ixv_local_timer(void *arg)
+{
+       struct adapter  *adapter = arg;
+       device_t        dev = adapter->dev;
+       struct tx_ring  *txr = adapter->tx_rings;
+       int             i;
+
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+       ixv_update_link_status(adapter);
+
+       /* Stats Update */
+       ixv_update_stats(adapter);
+
+       /*
+        * If the interface has been paused
+        * then don't do the watchdog check
+        */
+       if (IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & IXGBE_TFCS_TXOFF)
+               goto out;
+       /*
+       ** Check for time since any descriptor was cleaned
+       */
+        for (i = 0; i < adapter->num_queues; i++, txr++) {
+               IXV_TX_LOCK(txr);
+               if (txr->watchdog_check == FALSE) {
+                       IXV_TX_UNLOCK(txr);
+                       continue;
+               }
+               if ((ticks - txr->watchdog_time) > IXV_WATCHDOG)
+                       goto hung;
+               IXV_TX_UNLOCK(txr);
+       }
+out:
+               ixv_rearm_queues(adapter, adapter->que_mask);
+       callout_reset(&adapter->timer, hz, ixv_local_timer, adapter);
+       return;
+
+hung:
+       device_printf(adapter->dev, "Watchdog timeout -- resetting\n");
+       device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me,
+           IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDH(i)),
+           IXGBE_READ_REG(&adapter->hw, IXGBE_VFTDT(i)));
+       device_printf(dev,"TX(%d) desc avail = %d,"
+           "Next TX to Clean = %d\n",
+           txr->me, txr->tx_avail, txr->next_to_clean);
+       adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
+       adapter->watchdog_events++;
+       IXV_TX_UNLOCK(txr);
+       ixv_init_locked(adapter);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+**     the real check of the hardware only happens with
+**     a link interrupt.
+*/
+static void
+ixv_update_link_status(struct adapter *adapter)
+{
+       struct ifnet    *ifp = adapter->ifp;
+       struct tx_ring *txr = adapter->tx_rings;
+       device_t dev = adapter->dev;
+
+
+       if (adapter->link_up){ 
+               if (adapter->link_active == FALSE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is up %d Gbps %s \n",
+                                   ((adapter->link_speed == 128)? 10:1),
+                                   "Full Duplex");
+                       adapter->link_active = TRUE;
+                       if_link_state_change(ifp, LINK_STATE_UP);
+               }
+       } else { /* Link down */
+               if (adapter->link_active == TRUE) {
+                       if (bootverbose)
+                               device_printf(dev,"Link is Down\n");
+                       if_link_state_change(ifp, LINK_STATE_DOWN);
+                       adapter->link_active = FALSE;
+                       for (int i = 0; i < adapter->num_queues;
+                           i++, txr++)
+                               txr->watchdog_check = FALSE;
+               }
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine disables all traffic on the adapter by issuing a
+ *  global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+static void
+ixv_stop(void *arg)
+{
+       struct ifnet   *ifp;
+       struct adapter *adapter = arg;
+       struct ixgbe_hw *hw = &adapter->hw;
+       ifp = adapter->ifp;
+
+       mtx_assert(&adapter->core_mtx, MA_OWNED);
+
+       INIT_DEBUGOUT("ixv_stop: begin\n");
+       ixv_disable_intr(adapter);
+
+       /* Tell the stack that the interface is no longer active */
+       ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+
+       ixgbe_reset_hw(hw);
+       adapter->hw.adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+       callout_stop(&adapter->timer);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixv_identify_hardware(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       u16             pci_cmd_word;
+
+       /*
+       ** Make sure BUSMASTER is set, on a VM under
+       ** KVM it may not be and will break things.
+       */
+       pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+       if (!((pci_cmd_word & PCIM_CMD_BUSMASTEREN) &&
+           (pci_cmd_word & PCIM_CMD_MEMEN))) {
+               INIT_DEBUGOUT("Memory Access and/or Bus Master "
+                   "bits were not set!\n");
+               pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN);
+               pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+       }
+
+       /* Save off the information about this board */
+       adapter->hw.vendor_id = pci_get_vendor(dev);
+       adapter->hw.device_id = pci_get_device(dev);
+       adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+       adapter->hw.subsystem_vendor_id =
+           pci_read_config(dev, PCIR_SUBVEND_0, 2);
+       adapter->hw.subsystem_device_id =
+           pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup MSIX Interrupt resources and handlers 
+ *
+ **********************************************************************/
+static int
+ixv_allocate_msix(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct          ix_queue *que = adapter->queues;
+       int             error, rid, vector = 0;
+
+       for (int i = 0; i < adapter->num_queues; i++, vector++, que++) {
+               rid = vector + 1;
+               que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+                   RF_SHAREABLE | RF_ACTIVE);
+               if (que->res == NULL) {
+                       device_printf(dev,"Unable to allocate"
+                           " bus resource: que interrupt [%d]\n", vector);
+                       return (ENXIO);
+               }
+               /* Set the handler function */
+               error = bus_setup_intr(dev, que->res,
+                   INTR_TYPE_NET | INTR_MPSAFE, NULL,
+                   ixv_msix_que, que, &que->tag);
+               if (error) {
+                       que->res = NULL;
+                       device_printf(dev, "Failed to register QUE handler");
+                       return (error);
+               }
+#if __FreeBSD_version >= 800504
+               bus_describe_intr(dev, que->res, que->tag, "que %d", i);
+#endif
+               que->msix = vector;
+               adapter->que_mask |= (u64)(1 << que->msix);
+               /*
+               ** Bind the msix vector, and thus the
+               ** ring to the corresponding cpu.
+               */
+               if (adapter->num_queues > 1)
+                       bus_bind_intr(dev, que->res, i);
+
+               TASK_INIT(&que->que_task, 0, ixv_handle_que, que);
+               que->tq = taskqueue_create_fast("ixv_que", M_NOWAIT,
+                   taskqueue_thread_enqueue, &que->tq);
+               taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+                   device_get_nameunit(adapter->dev));
+       }
+
+       /* and Mailbox */
+       rid = vector + 1;
+       adapter->res = bus_alloc_resource_any(dev,
+           SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+       if (!adapter->res) {
+               device_printf(dev,"Unable to allocate"
+           " bus resource: MBX interrupt [%d]\n", rid);
+               return (ENXIO);
+       }
+       /* Set the mbx handler function */
+       error = bus_setup_intr(dev, adapter->res,
+           INTR_TYPE_NET | INTR_MPSAFE, NULL,
+           ixv_msix_mbx, adapter, &adapter->tag);
+       if (error) {
+               adapter->res = NULL;
+               device_printf(dev, "Failed to register LINK handler");
+               return (error);
+       }
+#if __FreeBSD_version >= 800504
+       bus_describe_intr(dev, adapter->res, adapter->tag, "mbx");
+#endif
+       adapter->mbxvec = vector;
+       /* Tasklets for Mailbox */
+       TASK_INIT(&adapter->mbx_task, 0, ixv_handle_mbx, adapter);
+       adapter->tq = taskqueue_create_fast("ixv_mbx", M_NOWAIT,
+           taskqueue_thread_enqueue, &adapter->tq);
+       taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s mbxq",
+           device_get_nameunit(adapter->dev));
+       /*
+       ** XXX - remove this when KVM/QEMU fix gets in...
+       ** Due to a broken design QEMU will fail to properly
+       ** enable the guest for MSIX unless the vectors in
+       ** the table are all set up, so we must rewrite the
+       ** ENABLE in the MSIX control register again at this
+       ** point to cause it to successfully initialize us.
+       */
+       if (adapter->hw.mac.type == ixgbe_mac_82599_vf) {
+               int msix_ctrl;
+               pci_find_extcap(dev, PCIY_MSIX, &rid);
+               rid += PCIR_MSIX_CTRL;
+               msix_ctrl = pci_read_config(dev, rid, 2);
+               msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+               pci_write_config(dev, rid, msix_ctrl, 2);
+       }
+
+       return (0);
+}
+
+/*
+ * Setup MSIX resources, note that the VF
+ * device MUST use MSIX, there is no fallback.
+ */
+static int
+ixv_setup_msix(struct adapter *adapter)
+{
+       device_t dev = adapter->dev;
+       int rid, vectors, want = 2;
+
+
+       /* First try MSI/X */
+       rid = PCIR_BAR(3);
+       adapter->msix_mem = bus_alloc_resource_any(dev,
+           SYS_RES_MEMORY, &rid, RF_ACTIVE);
+               if (!adapter->msix_mem) {
+               device_printf(adapter->dev,
+                   "Unable to map MSIX table \n");
+               goto out;
+       }
+
+       vectors = pci_msix_count(dev); 
+       if (vectors < 2) {
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   rid, adapter->msix_mem);
+               adapter->msix_mem = NULL;
+               goto out;
+       }
+
+       /*
+       ** Want two vectors: one for a queue,
+       ** plus an additional for mailbox.
+       */
+       if (pci_alloc_msix(dev, &want) == 0) {
+                       device_printf(adapter->dev,
+                   "Using MSIX interrupts with %d vectors\n", want);
+               return (want);
+       }
+out:
+       device_printf(adapter->dev,"MSIX config error\n");
+       return (ENXIO);
+}
+
+
+static int
+ixv_allocate_pci_resources(struct adapter *adapter)
+{
+       int             rid;
+       device_t        dev = adapter->dev;
+
+       rid = PCIR_BAR(0);
+       adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+           &rid, RF_ACTIVE);
+
+       if (!(adapter->pci_mem)) {
+               device_printf(dev,"Unable to allocate bus resource: memory\n");
+               return (ENXIO);
+       }
+
+       adapter->osdep.mem_bus_space_tag =
+               rman_get_bustag(adapter->pci_mem);
+       adapter->osdep.mem_bus_space_handle =
+               rman_get_bushandle(adapter->pci_mem);
+       adapter->hw.hw_addr = (u8 *) &adapter->osdep.mem_bus_space_handle;
+
+       adapter->num_queues = 1;
+       adapter->hw.back = &adapter->osdep;
+
+       /*
+       ** Now setup MSI/X, should
+       ** return us the number of
+       ** configured vectors.
+       */
+       adapter->msix = ixv_setup_msix(adapter);
+       if (adapter->msix == ENXIO)
+               return (ENXIO);
+       else
+               return (0);
+}
+
+static void
+ixv_free_pci_resources(struct adapter * adapter)
+{
+       struct          ix_queue *que = adapter->queues;
+       device_t        dev = adapter->dev;
+       int             rid, memrid;
+
+       memrid = PCIR_BAR(MSIX_BAR);
+
+       /*
+       ** There is a slight possibility of a failure mode
+       ** in attach that will result in entering this function
+       ** before interrupt resources have been initialized, and
+       ** in that case we do not want to execute the loops below
+       ** We can detect this reliably by the state of the adapter
+       ** res pointer.
+       */
+       if (adapter->res == NULL)
+               goto mem;
+
+       /*
+       **  Release all msix queue resources:
+       */
+       for (int i = 0; i < adapter->num_queues; i++, que++) {
+               rid = que->msix + 1;
+               if (que->tag != NULL) {
+                       bus_teardown_intr(dev, que->res, que->tag);
+                       que->tag = NULL;
+               }
+               if (que->res != NULL)
+                       bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+       }
+
+
+       /* Clean the Legacy or Link interrupt last */
+       if (adapter->mbxvec) /* we are doing MSIX */
+               rid = adapter->mbxvec + 1;
+       else
+               (adapter->msix != 0) ? (rid = 1):(rid = 0);
+
+       if (adapter->tag != NULL) {
+               bus_teardown_intr(dev, adapter->res, adapter->tag);
+               adapter->tag = NULL;
+       }
+       if (adapter->res != NULL)
+               bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res);
+
+mem:
+       if (adapter->msix)
+               pci_release_msi(dev);
+
+       if (adapter->msix_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   memrid, adapter->msix_mem);
+
+       if (adapter->pci_mem != NULL)
+               bus_release_resource(dev, SYS_RES_MEMORY,
+                   PCIR_BAR(0), adapter->pci_mem);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+static void
+ixv_setup_interface(device_t dev, struct adapter *adapter)
+{
+       struct ifnet   *ifp;
+
+       INIT_DEBUGOUT("ixv_setup_interface: begin");
+
+       ifp = adapter->ifp = if_alloc(IFT_ETHER);
+       if (ifp == NULL)
+               panic("%s: can not if_alloc()\n", device_get_nameunit(dev));
+       if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+       ifp->if_mtu = ETHERMTU;
+       ifp->if_baudrate = 1000000000;
+       ifp->if_init = ixv_init;
+       ifp->if_softc = adapter;
+       ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+       ifp->if_ioctl = ixv_ioctl;
+#if __FreeBSD_version >= 800000
+       ifp->if_transmit = ixv_mq_start;
+       ifp->if_qflush = ixv_qflush;
+#else
+       ifp->if_start = ixv_start;
+#endif
+       ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 2;
+
+       ether_ifattach(ifp, adapter->hw.mac.addr);
+
+       adapter->max_frame_size =
+           ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+       /*
+        * Tell the upper layer(s) we support long frames.
+        */
+       ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
+
+       ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4 | IFCAP_VLAN_HWCSUM;
+       ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU;
+       ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_LRO;
+
+       ifp->if_capenable = ifp->if_capabilities;
+
+       /*
+        * Specify the media types supported by this adapter and register
+        * callbacks to update media and link information
+        */
+       ifmedia_init(&adapter->media, IFM_IMASK, ixv_media_change,
+                    ixv_media_status);
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_FDX, 0, NULL);
+       ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+       ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
+
+       return;
+}
+       
+static void
+ixv_config_link(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32     autoneg, err = 0;
+       bool    negotiate = TRUE;
+
+       if (hw->mac.ops.check_link)
+               err = hw->mac.ops.check_link(hw, &autoneg,
+                   &adapter->link_up, FALSE);
+       if (err)
+               goto out;
+
+       if (hw->mac.ops.setup_link)
+                       err = hw->mac.ops.setup_link(hw, autoneg,
+                   negotiate, adapter->link_up);
+out:
+       return;
+}
+
+/********************************************************************
+ * Manage DMA'able memory.
+ *******************************************************************/
+static void
+ixv_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
+{
+       if (error)
+               return;
+       *(bus_addr_t *) arg = segs->ds_addr;
+       return;
+}
+
+static int
+ixv_dma_malloc(struct adapter *adapter, bus_size_t size,
+               struct ixv_dma_alloc *dma, int mapflags)
+{
+       device_t dev = adapter->dev;
+       int             r;
+
+       r = bus_dma_tag_create(bus_get_dma_tag(adapter->dev),   /* parent */
+                              DBA_ALIGN, 0,    /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,      /* filter, filterarg */
+                              size,    /* maxsize */
+                              1,       /* nsegments */
+                              size,    /* maxsegsize */
+                              BUS_DMA_ALLOCNOW,        /* flags */
+                              NULL,    /* lockfunc */
+                              NULL,    /* lockfuncarg */
+                              &dma->dma_tag);
+       if (r != 0) {
+               device_printf(dev,"ixv_dma_malloc: bus_dma_tag_create failed; "
+                      "error %u\n", r);
+               goto fail_0;
+       }
+       r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
+                            BUS_DMA_NOWAIT, &dma->dma_map);
+       if (r != 0) {
+               device_printf(dev,"ixv_dma_malloc: bus_dmamem_alloc failed; "
+                      "error %u\n", r);
+               goto fail_1;
+       }
+       r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
+                           size,
+                           ixv_dmamap_cb,
+                           &dma->dma_paddr,
+                           mapflags | BUS_DMA_NOWAIT);
+       if (r != 0) {
+               device_printf(dev,"ixv_dma_malloc: bus_dmamap_load failed; "
+                      "error %u\n", r);
+               goto fail_2;
+       }
+       dma->dma_size = size;
+       return (0);
+fail_2:
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+fail_1:
+       bus_dma_tag_destroy(dma->dma_tag);
+fail_0:
+       dma->dma_map = NULL;
+       dma->dma_tag = NULL;
+       return (r);
+}
+
+static void
+ixv_dma_free(struct adapter *adapter, struct ixv_dma_alloc *dma)
+{
+       bus_dmamap_sync(dma->dma_tag, dma->dma_map,
+           BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+       bus_dmamap_unload(dma->dma_tag, dma->dma_map);
+       bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
+       bus_dma_tag_destroy(dma->dma_tag);
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for the transmit and receive rings, and then
+ *  the descriptors associated with each, called only once at attach.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_queues(struct adapter *adapter)
+{
+       device_t        dev = adapter->dev;
+       struct ix_queue *que;
+       struct tx_ring  *txr;
+       struct rx_ring  *rxr;
+       int rsize, tsize, error = 0;
+       int txconf = 0, rxconf = 0;
+
+        /* First allocate the top level queue structs */
+        if (!(adapter->queues =
+            (struct ix_queue *) malloc(sizeof(struct ix_queue) *
+            adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+                device_printf(dev, "Unable to allocate queue memory\n");
+                error = ENOMEM;
+                goto fail;
+        }
+
+       /* First allocate the TX ring struct memory */
+       if (!(adapter->tx_rings =
+           (struct tx_ring *) malloc(sizeof(struct tx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate TX ring memory\n");
+               error = ENOMEM;
+               goto tx_fail;
+       }
+
+       /* Next allocate the RX */
+       if (!(adapter->rx_rings =
+           (struct rx_ring *) malloc(sizeof(struct rx_ring) *
+           adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate RX ring memory\n");
+               error = ENOMEM;
+               goto rx_fail;
+       }
+
+       /* For the ring itself */
+       tsize = roundup2(adapter->num_tx_desc *
+           sizeof(union ixgbe_adv_tx_desc), DBA_ALIGN);
+
+       /*
+        * Now set up the TX queues, txconf is needed to handle the
+        * possibility that things fail midcourse and we need to
+        * undo memory gracefully
+        */ 
+       for (int i = 0; i < adapter->num_queues; i++, txconf++) {
+               /* Set up some basics */
+               txr = &adapter->tx_rings[i];
+               txr->adapter = adapter;
+               txr->me = i;
+
+               /* Initialize the TX side lock */
+               snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+                   device_get_nameunit(dev), txr->me);
+               mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF);
+
+               if (ixv_dma_malloc(adapter, tsize,
+                       &txr->txdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate TX Descriptor memory\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+               txr->tx_base = (union ixgbe_adv_tx_desc *)txr->txdma.dma_vaddr;
+               bzero((void *)txr->tx_base, tsize);
+
+               /* Now allocate transmit buffers for the ring */
+               if (ixv_allocate_transmit_buffers(txr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up transmit buffers\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#if __FreeBSD_version >= 800000
+               /* Allocate a buf ring */
+               txr->br = buf_ring_alloc(IXV_BR_SIZE, M_DEVBUF,
+                   M_WAITOK, &txr->tx_mtx);
+               if (txr->br == NULL) {
+                       device_printf(dev,
+                           "Critical Failure setting up buf ring\n");
+                       error = ENOMEM;
+                       goto err_tx_desc;
+               }
+#endif
+       }
+
+       /*
+        * Next the RX queues...
+        */ 
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       for (int i = 0; i < adapter->num_queues; i++, rxconf++) {
+               rxr = &adapter->rx_rings[i];
+               /* Set up some basics */
+               rxr->adapter = adapter;
+               rxr->me = i;
+
+               /* Initialize the RX side lock */
+               snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+                   device_get_nameunit(dev), rxr->me);
+               mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+               if (ixv_dma_malloc(adapter, rsize,
+                       &rxr->rxdma, BUS_DMA_NOWAIT)) {
+                       device_printf(dev,
+                           "Unable to allocate RxDescriptor memory\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+               rxr->rx_base = (union ixgbe_adv_rx_desc *)rxr->rxdma.dma_vaddr;
+               bzero((void *)rxr->rx_base, rsize);
+
+               /* Allocate receive buffers for the ring*/
+               if (ixv_allocate_receive_buffers(rxr)) {
+                       device_printf(dev,
+                           "Critical Failure setting up receive buffers\n");
+                       error = ENOMEM;
+                       goto err_rx_desc;
+               }
+       }
+
+       /*
+       ** Finally set up the queue holding structs
+       */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               que = &adapter->queues[i];
+               que->adapter = adapter;
+               que->txr = &adapter->tx_rings[i];
+               que->rxr = &adapter->rx_rings[i];
+       }
+
+       return (0);
+
+err_rx_desc:
+       for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--)
+               ixv_dma_free(adapter, &rxr->rxdma);
+err_tx_desc:
+       for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--)
+               ixv_dma_free(adapter, &txr->txdma);
+       free(adapter->rx_rings, M_DEVBUF);
+rx_fail:
+       free(adapter->tx_rings, M_DEVBUF);
+tx_fail:
+       free(adapter->queues, M_DEVBUF);
+fail:
+       return (error);
+}
+
+
+/*********************************************************************
+ *
+ *  Allocate memory for tx_buffer structures. The tx_buffer stores all
+ *  the information needed to transmit a packet on the wire. This is
+ *  called only once at attach, setup is done every reset.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       device_t dev = adapter->dev;
+       struct ixv_tx_buf *txbuf;
+       int error, i;
+
+       /*
+        * Setup DMA descriptor areas.
+        */
+       if ((error = bus_dma_tag_create(NULL,           /* parent */
+                              1, 0,            /* alignment, bounds */
+                              BUS_SPACE_MAXADDR,       /* lowaddr */
+                              BUS_SPACE_MAXADDR,       /* highaddr */
+                              NULL, NULL,              /* filter, filterarg */
+                              IXV_TSO_SIZE,            /* maxsize */
+                              32,                      /* nsegments */
+                              PAGE_SIZE,               /* maxsegsize */
+                              0,                       /* flags */
+                              NULL,                    /* lockfunc */
+                              NULL,                    /* lockfuncarg */
+                              &txr->txtag))) {
+               device_printf(dev,"Unable to allocate TX DMA tag\n");
+               goto fail;
+       }
+
+       if (!(txr->tx_buffers =
+           (struct ixv_tx_buf *) malloc(sizeof(struct ixv_tx_buf) *
+           adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate tx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+        /* Create the descriptor buffer dma maps */
+       txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               error = bus_dmamap_create(txr->txtag, 0, &txbuf->map);
+               if (error != 0) {
+                       device_printf(dev, "Unable to create TX DMA map\n");
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       /* We free all, it handles case where we are in the middle */
+       ixv_free_transmit_structures(adapter);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize a transmit ring.
+ *
+ **********************************************************************/
+static void
+ixv_setup_transmit_ring(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixv_tx_buf *txbuf;
+       int i;
+
+       /* Clear the old ring contents */
+       IXV_TX_LOCK(txr);
+       bzero((void *)txr->tx_base,
+             (sizeof(union ixgbe_adv_tx_desc)) * adapter->num_tx_desc);
+       /* Reset indices */
+       txr->next_avail_desc = 0;
+       txr->next_to_clean = 0;
+
+       /* Free any existing tx buffers. */
+        txbuf = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) {
+               if (txbuf->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, txbuf->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag, txbuf->map);
+                       m_freem(txbuf->m_head);
+                       txbuf->m_head = NULL;
+               }
+               /* Clear the EOP index */
+               txbuf->eop_index = -1;
+        }
+
+       /* Set number of descriptors available */
+       txr->tx_avail = adapter->num_tx_desc;
+
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+       IXV_TX_UNLOCK(txr);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all transmit rings.
+ *
+ **********************************************************************/
+static int
+ixv_setup_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++)
+               ixv_setup_transmit_ring(txr);
+
+       return (0);
+}
+
+/*********************************************************************
+ *
+ *  Enable transmit unit.
+ *
+ **********************************************************************/
+static void
+ixv_initialize_transmit_units(struct adapter *adapter)
+{
+       struct tx_ring  *txr = adapter->tx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               u64     tdba = txr->txdma.dma_paddr;
+               u32     txctrl, txdctl;
+
+               /* Set WTHRESH to 8, burst writeback */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= (8 << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+               /* Now enable */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+               /* Set the HW Tx Head and Tail indices */
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDH(i), 0);
+               IXGBE_WRITE_REG(&adapter->hw, IXGBE_VFTDT(i), 0);
+
+               /* Setup Transmit Descriptor Cmd Settings */
+               txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+               txr->watchdog_check = FALSE;
+
+               /* Set Ring parameters */
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+                      (tdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+                   adapter->num_tx_desc *
+                   sizeof(struct ixgbe_legacy_tx_desc));
+               txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+               break;
+       }
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all transmit rings.
+ *
+ **********************************************************************/
+static void
+ixv_free_transmit_structures(struct adapter *adapter)
+{
+       struct tx_ring *txr = adapter->tx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, txr++) {
+               IXV_TX_LOCK(txr);
+               ixv_free_transmit_buffers(txr);
+               ixv_dma_free(adapter, &txr->txdma);
+               IXV_TX_UNLOCK(txr);
+               IXV_TX_LOCK_DESTROY(txr);
+       }
+       free(adapter->tx_rings, M_DEVBUF);
+}
+
+/*********************************************************************
+ *
+ *  Free transmit ring related data structures.
+ *
+ **********************************************************************/
+static void
+ixv_free_transmit_buffers(struct tx_ring *txr)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixv_tx_buf *tx_buffer;
+       int             i;
+
+       INIT_DEBUGOUT("free_transmit_ring: begin");
+
+       if (txr->tx_buffers == NULL)
+               return;
+
+       tx_buffer = txr->tx_buffers;
+       for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) {
+               if (tx_buffer->m_head != NULL) {
+                       bus_dmamap_sync(txr->txtag, tx_buffer->map,
+                           BUS_DMASYNC_POSTWRITE);
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       m_freem(tx_buffer->m_head);
+                       tx_buffer->m_head = NULL;
+                       if (tx_buffer->map != NULL) {
+                               bus_dmamap_destroy(txr->txtag,
+                                   tx_buffer->map);
+                               tx_buffer->map = NULL;
+                       }
+               } else if (tx_buffer->map != NULL) {
+                       bus_dmamap_unload(txr->txtag,
+                           tx_buffer->map);
+                       bus_dmamap_destroy(txr->txtag,
+                           tx_buffer->map);
+                       tx_buffer->map = NULL;
+               }
+       }
+#if __FreeBSD_version >= 800000
+       if (txr->br != NULL)
+               buf_ring_free(txr->br, M_DEVBUF);
+#endif
+       if (txr->tx_buffers != NULL) {
+               free(txr->tx_buffers, M_DEVBUF);
+               txr->tx_buffers = NULL;
+       }
+       if (txr->txtag != NULL) {
+               bus_dma_tag_destroy(txr->txtag);
+               txr->txtag = NULL;
+       }
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Advanced Context Descriptor setup for VLAN or CSUM
+ *
+ **********************************************************************/
+
+static boolean_t
+ixv_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixv_tx_buf        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct ip6_hdr *ip6;
+       int  ehdrlen, ip_hlen = 0;
+       u16     etype;
+       u8      ipproto = 0;
+       bool    offload = TRUE;
+       int ctxd = txr->next_avail_desc;
+       u16 vtag = 0;
+
+
+       if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0)
+               offload = FALSE;
+
+
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       /*
+       ** In advanced descriptors the vlan tag must 
+       ** be placed into the descriptor itself.
+       */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+               vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       } else if (offload == FALSE)
+               return FALSE;
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present,
+        * helpful for QinQ too.
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+               etype = ntohs(eh->evl_proto);
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       } else {
+               etype = ntohs(eh->evl_encap_proto);
+               ehdrlen = ETHER_HDR_LEN;
+       }
+
+       /* Set the ether header length */
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+
+       switch (etype) {
+               case ETHERTYPE_IP:
+                       ip = (struct ip *)(mp->m_data + ehdrlen);
+                       ip_hlen = ip->ip_hl << 2;
+                       if (mp->m_len < ehdrlen + ip_hlen)
+                               return (FALSE);
+                       ipproto = ip->ip_p;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+                       break;
+               case ETHERTYPE_IPV6:
+                       ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen);
+                       ip_hlen = sizeof(struct ip6_hdr);
+                       if (mp->m_len < ehdrlen + ip_hlen)
+                               return (FALSE);
+                       ipproto = ip6->ip6_nxt;
+                       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV6;
+                       break;
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       vlan_macip_lens |= ip_hlen;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+
+       switch (ipproto) {
+               case IPPROTO_TCP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_TCP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+                       break;
+
+               case IPPROTO_UDP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_UDP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP;
+                       break;
+
+#if __FreeBSD_version >= 800000
+               case IPPROTO_SCTP:
+                       if (mp->m_pkthdr.csum_flags & CSUM_SCTP)
+                               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
+                       break;
+#endif
+               default:
+                       offload = FALSE;
+                       break;
+       }
+
+       /* Now copy bits into descriptor */
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+       TXD->seqnum_seed = htole32(0);
+       TXD->mss_l4len_idx = htole32(0);
+
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       /* We've consumed the first desc, adjust counters */
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+       txr->next_avail_desc = ctxd;
+       --txr->tx_avail;
+
+        return (offload);
+}
+
+/**********************************************************************
+ *
+ *  Setup work for hardware segmentation offload (TSO) on
+ *  adapters using advanced tx descriptors
+ *
+ **********************************************************************/
+static boolean_t
+ixv_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *paylen)
+{
+       struct adapter *adapter = txr->adapter;
+       struct ixgbe_adv_tx_context_desc *TXD;
+       struct ixv_tx_buf        *tx_buffer;
+       u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+       u32 mss_l4len_idx = 0;
+       u16 vtag = 0;
+       int ctxd, ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
+       struct ether_vlan_header *eh;
+       struct ip *ip;
+       struct tcphdr *th;
+
+
+       /*
+        * Determine where frame payload starts.
+        * Jump over vlan headers if already present
+        */
+       eh = mtod(mp, struct ether_vlan_header *);
+       if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) 
+               ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
+       else
+               ehdrlen = ETHER_HDR_LEN;
+
+        /* Ensure we have at least the IP+TCP header in the first mbuf. */
+        if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr))
+               return FALSE;
+
+       ctxd = txr->next_avail_desc;
+       tx_buffer = &txr->tx_buffers[ctxd];
+       TXD = (struct ixgbe_adv_tx_context_desc *) &txr->tx_base[ctxd];
+
+       ip = (struct ip *)(mp->m_data + ehdrlen);
+       if (ip->ip_p != IPPROTO_TCP)
+               return FALSE;   /* 0 */
+       ip->ip_sum = 0;
+       ip_hlen = ip->ip_hl << 2;
+       th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
+       th->th_sum = in_pseudo(ip->ip_src.s_addr,
+           ip->ip_dst.s_addr, htons(IPPROTO_TCP));
+       tcp_hlen = th->th_off << 2;
+       hdrlen = ehdrlen + ip_hlen + tcp_hlen;
+
+       /* This is used in the transmit desc in encap */
+       *paylen = mp->m_pkthdr.len - hdrlen;
+
+       /* VLAN MACLEN IPLEN */
+       if (mp->m_flags & M_VLANTAG) {
+               vtag = htole16(mp->m_pkthdr.ether_vtag);
+                vlan_macip_lens |= (vtag << IXGBE_ADVTXD_VLAN_SHIFT);
+       }
+
+       vlan_macip_lens |= ehdrlen << IXGBE_ADVTXD_MACLEN_SHIFT;
+       vlan_macip_lens |= ip_hlen;
+       TXD->vlan_macip_lens |= htole32(vlan_macip_lens);
+
+       /* ADV DTYPE TUCMD */
+       type_tucmd_mlhl |= IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+       type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+       TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl);
+
+
+       /* MSS L4LEN IDX */
+       mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+       mss_l4len_idx |= (tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+       TXD->mss_l4len_idx = htole32(mss_l4len_idx);
+
+       TXD->seqnum_seed = htole32(0);
+       tx_buffer->m_head = NULL;
+       tx_buffer->eop_index = -1;
+
+       if (++ctxd == adapter->num_tx_desc)
+               ctxd = 0;
+
+       txr->tx_avail--;
+       txr->next_avail_desc = ctxd;
+       return TRUE;
+}
+
+
+/**********************************************************************
+ *
+ *  Examine each tx_buffer in the used queue. If the hardware is done
+ *  processing the packet then free associated resources. The
+ *  tx_buffer is put back on the free queue.
+ *
+ **********************************************************************/
+static boolean_t
+ixv_txeof(struct tx_ring *txr)
+{
+       struct adapter  *adapter = txr->adapter;
+       struct ifnet    *ifp = adapter->ifp;
+       u32     first, last, done;
+       struct ixv_tx_buf *tx_buffer;
+       struct ixgbe_legacy_tx_desc *tx_desc, *eop_desc;
+
+       mtx_assert(&txr->tx_mtx, MA_OWNED);
+
+       if (txr->tx_avail == adapter->num_tx_desc)
+               return FALSE;
+
+       first = txr->next_to_clean;
+       tx_buffer = &txr->tx_buffers[first];
+       /* For cleanup we just use legacy struct */
+       tx_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+       last = tx_buffer->eop_index;
+       if (last == -1)
+               return FALSE;
+       eop_desc = (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+
+       /*
+       ** Get the index of the first descriptor
+       ** BEYOND the EOP and call that 'done'.
+       ** I do this so the comparison in the
+       ** inner while loop below can be simple
+       */
+       if (++last == adapter->num_tx_desc) last = 0;
+       done = last;
+
+        bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+            BUS_DMASYNC_POSTREAD);
+       /*
+       ** Only the EOP descriptor of a packet now has the DD
+       ** bit set, this is what we look for...
+       */
+       while (eop_desc->upper.fields.status & IXGBE_TXD_STAT_DD) {
+               /* We clean the range of the packet */
+               while (first != done) {
+                       tx_desc->upper.data = 0;
+                       tx_desc->lower.data = 0;
+                       tx_desc->buffer_addr = 0;
+                       ++txr->tx_avail;
+
+                       if (tx_buffer->m_head) {
+                               bus_dmamap_sync(txr->txtag,
+                                   tx_buffer->map,
+                                   BUS_DMASYNC_POSTWRITE);
+                               bus_dmamap_unload(txr->txtag,
+                                   tx_buffer->map);
+                               m_freem(tx_buffer->m_head);
+                               tx_buffer->m_head = NULL;
+                               tx_buffer->map = NULL;
+                       }
+                       tx_buffer->eop_index = -1;
+                       txr->watchdog_time = ticks;
+
+                       if (++first == adapter->num_tx_desc)
+                               first = 0;
+
+                       tx_buffer = &txr->tx_buffers[first];
+                       tx_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[first];
+               }
+               ++ifp->if_opackets;
+               /* See if there is more work now */
+               last = tx_buffer->eop_index;
+               if (last != -1) {
+                       eop_desc =
+                           (struct ixgbe_legacy_tx_desc *)&txr->tx_base[last];
+                       /* Get next done point */
+                       if (++last == adapter->num_tx_desc) last = 0;
+                       done = last;
+               } else
+                       break;
+       }
+       bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       txr->next_to_clean = first;
+
+       /*
+        * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that
+        * it is OK to send packets. If there are no pending descriptors,
+        * clear the timeout. Otherwise, if some descriptors have been freed,
+        * restart the timeout.
+        */
+       if (txr->tx_avail > IXV_TX_CLEANUP_THRESHOLD) {
+               ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+               if (txr->tx_avail == adapter->num_tx_desc) {
+                       txr->watchdog_check = FALSE;
+                       return FALSE;
+               }
+       }
+
+       return TRUE;
+}
+
+/*********************************************************************
+ *
+ *  Refresh mbuf buffers for RX descriptor rings
+ *   - now keeps its own state so discards due to resource
+ *     exhaustion are unnecessary, if an mbuf cannot be obtained
+ *     it just returns, keeping its placeholder, thus it can simply
+ *     be recalled to try again.
+ *
+ **********************************************************************/
+static void
+ixv_refresh_mbufs(struct rx_ring *rxr, int limit)
+{
+       struct adapter          *adapter = rxr->adapter;
+       bus_dma_segment_t       hseg[1];
+       bus_dma_segment_t       pseg[1];
+       struct ixv_rx_buf       *rxbuf;
+       struct mbuf             *mh, *mp;
+       int                     i, j, nsegs, error;
+       bool                    refreshed = FALSE;
+
+       i = j = rxr->next_to_refresh;
+        /* Get the control variable, one beyond refresh point */
+       if (++j == adapter->num_rx_desc)
+               j = 0;
+       while (j != limit) {
+               rxbuf = &rxr->rx_buffers[i];
+               if ((rxbuf->m_head == NULL) && (rxr->hdr_split)) {
+                       mh = m_gethdr(M_DONTWAIT, MT_DATA);
+                       if (mh == NULL)
+                               goto update;
+                       mh->m_pkthdr.len = mh->m_len = MHLEN;
+                       mh->m_len = MHLEN;
+                       mh->m_flags |= M_PKTHDR;
+                       m_adj(mh, ETHER_ALIGN);
+                       /* Get the memory mapping */
+                       error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                           rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT);
+                       if (error != 0) {
+                               printf("GET BUF: dmamap load"
+                                   " failure - %d\n", error);
+                               m_free(mh);
+                               goto update;
+                       }
+                       rxbuf->m_head = mh;
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_PREREAD);
+                       rxr->rx_base[i].read.hdr_addr =
+                           htole64(hseg[0].ds_addr);
+               }
+
+               if (rxbuf->m_pack == NULL) {
+                       mp = m_getjcl(M_DONTWAIT, MT_DATA,
+                           M_PKTHDR, adapter->rx_mbuf_sz);
+                       if (mp == NULL)
+                               goto update;
+               } else
+                       mp = rxbuf->m_pack;
+
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) {
+                       printf("GET BUF: dmamap load"
+                           " failure - %d\n", error);
+                       m_free(mp);
+                       rxbuf->m_pack = NULL;
+                       goto update;
+               }
+               rxbuf->m_pack = mp;
+               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                   BUS_DMASYNC_PREREAD);
+               rxr->rx_base[i].read.pkt_addr =
+                   htole64(pseg[0].ds_addr);
+
+               refreshed = TRUE;
+               rxr->next_to_refresh = i = j;
+               /* Calculate next index */
+               if (++j == adapter->num_rx_desc)
+                       j = 0;
+       }
+update:
+       if (refreshed) /* update tail index */
+               IXGBE_WRITE_REG(&adapter->hw,
+                   IXGBE_VFRDT(rxr->me), rxr->next_to_refresh);
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Allocate memory for rx_buffer structures. Since we use one
+ *  rx_buffer per received packet, the maximum number of rx_buffer's
+ *  that we'll need is equal to the number of receive descriptors
+ *  that we've allocated.
+ *
+ **********************************************************************/
+static int
+ixv_allocate_receive_buffers(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter = rxr->adapter;
+       device_t                dev = adapter->dev;
+       struct ixv_rx_buf       *rxbuf;
+       int                     i, bsize, error;
+
+       bsize = sizeof(struct ixv_rx_buf) * adapter->num_rx_desc;
+       if (!(rxr->rx_buffers =
+           (struct ixv_rx_buf *) malloc(bsize,
+           M_DEVBUF, M_NOWAIT | M_ZERO))) {
+               device_printf(dev, "Unable to allocate rx_buffer memory\n");
+               error = ENOMEM;
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MSIZE,               /* maxsize */
+                                  1,                   /* nsegments */
+                                  MSIZE,               /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->htag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       if ((error = bus_dma_tag_create(bus_get_dma_tag(dev),   /* parent */
+                                  1, 0,        /* alignment, bounds */
+                                  BUS_SPACE_MAXADDR,   /* lowaddr */
+                                  BUS_SPACE_MAXADDR,   /* highaddr */
+                                  NULL, NULL,          /* filter, filterarg */
+                                  MJUMPAGESIZE,        /* maxsize */
+                                  1,                   /* nsegments */
+                                  MJUMPAGESIZE,        /* maxsegsize */
+                                  0,                   /* flags */
+                                  NULL,                /* lockfunc */
+                                  NULL,                /* lockfuncarg */
+                                  &rxr->ptag))) {
+               device_printf(dev, "Unable to create RX DMA tag\n");
+               goto fail;
+       }
+
+       for (i = 0; i < adapter->num_rx_desc; i++, rxbuf++) {
+               rxbuf = &rxr->rx_buffers[i];
+               error = bus_dmamap_create(rxr->htag,
+                   BUS_DMA_NOWAIT, &rxbuf->hmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX head map\n");
+                       goto fail;
+               }
+               error = bus_dmamap_create(rxr->ptag,
+                   BUS_DMA_NOWAIT, &rxbuf->pmap);
+               if (error) {
+                       device_printf(dev, "Unable to create RX pkt map\n");
+                       goto fail;
+               }
+       }
+
+       return (0);
+
+fail:
+       /* Frees all, but can handle partial completion */
+       ixv_free_receive_structures(adapter);
+       return (error);
+}
+
+static void     
+ixv_free_receive_ring(struct rx_ring *rxr)
+{ 
+       struct  adapter         *adapter;
+       struct ixv_rx_buf       *rxbuf;
+       int i;
+
+       adapter = rxr->adapter;
+       for (i = 0; i < adapter->num_rx_desc; i++) {
+               rxbuf = &rxr->rx_buffers[i];
+               if (rxbuf->m_head != NULL) {
+                       bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                       rxbuf->m_head->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_head);
+               }
+               if (rxbuf->m_pack != NULL) {
+                       bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                           BUS_DMASYNC_POSTREAD);
+                       bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                       rxbuf->m_pack->m_flags |= M_PKTHDR;
+                       m_freem(rxbuf->m_pack);
+               }
+               rxbuf->m_head = NULL;
+               rxbuf->m_pack = NULL;
+       }
+}
+
+
+/*********************************************************************
+ *
+ *  Initialize a receive ring and its buffers.
+ *
+ **********************************************************************/
+static int
+ixv_setup_receive_ring(struct rx_ring *rxr)
+{
+       struct  adapter         *adapter;
+       struct ifnet            *ifp;
+       device_t                dev;
+       struct ixv_rx_buf       *rxbuf;
+       bus_dma_segment_t       pseg[1], hseg[1];
+       struct lro_ctrl         *lro = &rxr->lro;
+       int                     rsize, nsegs, error = 0;
+
+       adapter = rxr->adapter;
+       ifp = adapter->ifp;
+       dev = adapter->dev;
+
+       /* Clear the ring contents */
+       IXV_RX_LOCK(rxr);
+       rsize = roundup2(adapter->num_rx_desc *
+           sizeof(union ixgbe_adv_rx_desc), DBA_ALIGN);
+       bzero((void *)rxr->rx_base, rsize);
+
+       /* Free current RX buffer structs and their mbufs */
+       ixv_free_receive_ring(rxr);
+
+       /* Configure header split? */
+       if (ixv_header_split)
+               rxr->hdr_split = TRUE;
+
+       /* Now replenish the mbufs */
+       for (int j = 0; j != adapter->num_rx_desc; ++j) {
+               struct mbuf     *mh, *mp;
+
+               rxbuf = &rxr->rx_buffers[j];
+               /*
+               ** Dont allocate mbufs if not
+               ** doing header split, its wasteful
+               */ 
+               if (rxr->hdr_split == FALSE)
+                       goto skip_head;
+
+               /* First the header */
+               rxbuf->m_head = m_gethdr(M_NOWAIT, MT_DATA);
+               if (rxbuf->m_head == NULL) {
+                       error = ENOBUFS;
+                       goto fail;
+               }
+               m_adj(rxbuf->m_head, ETHER_ALIGN);
+               mh = rxbuf->m_head;
+               mh->m_len = mh->m_pkthdr.len = MHLEN;
+               mh->m_flags |= M_PKTHDR;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->htag,
+                   rxbuf->hmap, rxbuf->m_head, hseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0) /* Nothing elegant to do here */
+                       goto fail;
+               bus_dmamap_sync(rxr->htag,
+                   rxbuf->hmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr);
+
+skip_head:
+               /* Now the payload cluster */
+               rxbuf->m_pack = m_getjcl(M_NOWAIT, MT_DATA,
+                   M_PKTHDR, adapter->rx_mbuf_sz);
+               if (rxbuf->m_pack == NULL) {
+                       error = ENOBUFS;
+                        goto fail;
+               }
+               mp = rxbuf->m_pack;
+               mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz;
+               /* Get the memory mapping */
+               error = bus_dmamap_load_mbuf_sg(rxr->ptag,
+                   rxbuf->pmap, mp, pseg,
+                   &nsegs, BUS_DMA_NOWAIT);
+               if (error != 0)
+                        goto fail;
+               bus_dmamap_sync(rxr->ptag,
+                   rxbuf->pmap, BUS_DMASYNC_PREREAD);
+               /* Update descriptor */
+               rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr);
+       }
+
+
+       /* Setup our descriptor indices */
+       rxr->next_to_check = 0;
+       rxr->next_to_refresh = 0;
+       rxr->lro_enabled = FALSE;
+       rxr->rx_split_packets = 0;
+       rxr->rx_bytes = 0;
+       rxr->discard = FALSE;
+
+       bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+           BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+       /*
+       ** Now set up the LRO interface:
+       */
+       if (ifp->if_capenable & IFCAP_LRO) {
+               int err = tcp_lro_init(lro);
+               if (err) {
+                       device_printf(dev, "LRO Initialization failed!\n");
+                       goto fail;
+               }
+               INIT_DEBUGOUT("RX Soft LRO Initialized\n");
+               rxr->lro_enabled = TRUE;
+               lro->ifp = adapter->ifp;
+       }
+
+       IXV_RX_UNLOCK(rxr);
+       return (0);
+
+fail:
+       ixv_free_receive_ring(rxr);
+       IXV_RX_UNLOCK(rxr);
+       return (error);
+}
+
+/*********************************************************************
+ *
+ *  Initialize all receive rings.
+ *
+ **********************************************************************/
+static int
+ixv_setup_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+       int j;
+
+       for (j = 0; j < adapter->num_queues; j++, rxr++)
+               if (ixv_setup_receive_ring(rxr))
+                       goto fail;
+
+       return (0);
+fail:
+       /*
+        * Free RX buffers allocated so far, we will only handle
+        * the rings that completed, the failing case will have
+        * cleaned up for itself. 'j' failed, so its the terminus.
+        */
+       for (int i = 0; i < j; ++i) {
+               rxr = &adapter->rx_rings[i];
+               ixv_free_receive_ring(rxr);
+       }
+
+       return (ENOBUFS);
+}
+
+/*********************************************************************
+ *
+ *  Setup receive registers and features.
+ *
+ **********************************************************************/
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void
+ixv_initialize_receive_units(struct adapter *adapter)
+{
+       struct  rx_ring *rxr = adapter->rx_rings;
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ifnet   *ifp = adapter->ifp;
+       u32             bufsz, fctrl, rxcsum, hlreg;
+
+
+       /* Enable broadcasts */
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF;
+       fctrl |= IXGBE_FCTRL_PMCF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+       /* Set for Jumbo Frames? */
+       hlreg = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       if (ifp->if_mtu > ETHERMTU) {
+               hlreg |= IXGBE_HLREG0_JUMBOEN;
+               bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       } else {
+               hlreg &= ~IXGBE_HLREG0_JUMBOEN;
+               bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg);
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               u64 rdba = rxr->rxdma.dma_paddr;
+               u32 reg, rxdctl;
+
+               /* Do the queue enabling first */
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+               for (int k = 0; k < 10; k++) {
+                       if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+                           IXGBE_RXDCTL_ENABLE)
+                               break;
+                       else
+                               msec_delay(1);
+               }
+               wmb();
+
+               /* Setup the Base and Length of the Rx Descriptor Ring */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+                   (rdba & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+                   (rdba >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+                   adapter->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+               /* Set up the SRRCTL register */
+               reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+               reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+               reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+               reg |= bufsz;
+               if (rxr->hdr_split) {
+                       /* Use a standard mbuf for the header */
+                       reg |= ((IXV_RX_HDR <<
+                           IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT)
+                           & IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       reg |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               } else
+                       reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+               IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+
+               /* Setup the HW Rx Head and Tail Descriptor Pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
+                   adapter->num_rx_desc - 1);
+       }
+
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+
+       if (ifp->if_capenable & IFCAP_RXCSUM)
+               rxcsum |= IXGBE_RXCSUM_PCSD;
+
+       if (!(rxcsum & IXGBE_RXCSUM_PCSD))
+               rxcsum |= IXGBE_RXCSUM_IPPCSE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+       return;
+}
+
+/*********************************************************************
+ *
+ *  Free all receive rings.
+ *
+ **********************************************************************/
+static void
+ixv_free_receive_structures(struct adapter *adapter)
+{
+       struct rx_ring *rxr = adapter->rx_rings;
+
+       for (int i = 0; i < adapter->num_queues; i++, rxr++) {
+               struct lro_ctrl         *lro = &rxr->lro;
+               ixv_free_receive_buffers(rxr);
+               /* Free LRO memory */
+               tcp_lro_free(lro);
+               /* Free the ring memory as well */
+               ixv_dma_free(adapter, &rxr->rxdma);
+       }
+
+       free(adapter->rx_rings, M_DEVBUF);
+}
+
+
+/*********************************************************************
+ *
+ *  Free receive ring data structures
+ *
+ **********************************************************************/
+static void
+ixv_free_receive_buffers(struct rx_ring *rxr)
+{
+       struct adapter          *adapter = rxr->adapter;
+       struct ixv_rx_buf       *rxbuf;
+
+       INIT_DEBUGOUT("free_receive_structures: begin");
+
+       /* Cleanup any existing buffers */
+       if (rxr->rx_buffers != NULL) {
+               for (int i = 0; i < adapter->num_rx_desc; i++) {
+                       rxbuf = &rxr->rx_buffers[i];
+                       if (rxbuf->m_head != NULL) {
+                               bus_dmamap_sync(rxr->htag, rxbuf->hmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->htag, rxbuf->hmap);
+                               rxbuf->m_head->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_head);
+                       }
+                       if (rxbuf->m_pack != NULL) {
+                               bus_dmamap_sync(rxr->ptag, rxbuf->pmap,
+                                   BUS_DMASYNC_POSTREAD);
+                               bus_dmamap_unload(rxr->ptag, rxbuf->pmap);
+                               rxbuf->m_pack->m_flags |= M_PKTHDR;
+                               m_freem(rxbuf->m_pack);
+                       }
+                       rxbuf->m_head = NULL;
+                       rxbuf->m_pack = NULL;
+                       if (rxbuf->hmap != NULL) {
+                               bus_dmamap_destroy(rxr->htag, rxbuf->hmap);
+                               rxbuf->hmap = NULL;
+                       }
+                       if (rxbuf->pmap != NULL) {
+                               bus_dmamap_destroy(rxr->ptag, rxbuf->pmap);
+                               rxbuf->pmap = NULL;
+                       }
+               }
+               if (rxr->rx_buffers != NULL) {
+                       free(rxr->rx_buffers, M_DEVBUF);
+                       rxr->rx_buffers = NULL;
+               }
+       }
+
+       if (rxr->htag != NULL) {
+               bus_dma_tag_destroy(rxr->htag);
+               rxr->htag = NULL;
+       }
+       if (rxr->ptag != NULL) {
+               bus_dma_tag_destroy(rxr->ptag);
+               rxr->ptag = NULL;
+       }
+
+       return;
+}
+
+static __inline void
+ixv_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype)
+{
+                 
+        /*
+         * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet
+         * should be computed by hardware. Also it should not have VLAN tag in
+         * ethernet header.
+         */
+        if (rxr->lro_enabled &&
+            (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 &&
+            (ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+            (ptype & (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP)) ==
+            (IXGBE_RXDADV_PKTTYPE_IPV4 | IXGBE_RXDADV_PKTTYPE_TCP) &&
+            (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) ==
+            (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) {
+                /*
+                 * Send to the stack if:
+                 **  - LRO not enabled, or
+                 **  - no LRO resources, or
+                 **  - lro enqueue fails
+                 */
+                if (rxr->lro.lro_cnt != 0)
+                        if (tcp_lro_rx(&rxr->lro, m, 0) == 0)
+                                return;
+        }
+       IXV_RX_UNLOCK(rxr);
+        (*ifp->if_input)(ifp, m);
+       IXV_RX_LOCK(rxr);
+}
+
+static __inline void
+ixv_rx_discard(struct rx_ring *rxr, int i)
+{
+       struct ixv_rx_buf       *rbuf;
+
+       rbuf = &rxr->rx_buffers[i];
+
+       if (rbuf->fmp != NULL) {/* Partial chain ? */
+               rbuf->fmp->m_flags |= M_PKTHDR;
+               m_freem(rbuf->fmp);
+               rbuf->fmp = NULL;
+       }
+
+       /*
+       ** With advanced descriptors the writeback
+       ** clobbers the buffer addrs, so its easier
+       ** to just free the existing mbufs and take
+       ** the normal refresh path to get new buffers
+       ** and mapping.
+       */
+       if (rbuf->m_head) {
+               m_free(rbuf->m_head);
+               rbuf->m_head = NULL;
+       }
+
+       if (rbuf->m_pack) {
+               m_free(rbuf->m_pack);
+               rbuf->m_pack = NULL;
+       }
+
+       return;
+}
+
+
+/*********************************************************************
+ *
+ *  This routine executes in interrupt context. It replenishes
+ *  the mbufs in the descriptor and sends data which has been
+ *  dma'ed into host memory to upper layer.
+ *
+ *  We loop at most count times if count is > 0, or until done if
+ *  count < 0.
+ *
+ *  Return TRUE for more work, FALSE for all clean.
+ *********************************************************************/
+static bool
+ixv_rxeof(struct ix_queue *que, int count)
+{
+       struct adapter          *adapter = que->adapter;
+       struct rx_ring          *rxr = que->rxr;
+       struct ifnet            *ifp = adapter->ifp;
+       struct lro_ctrl         *lro = &rxr->lro;
+       struct lro_entry        *queued;
+       int                     i, nextp, processed = 0;
+       u32                     staterr = 0;
+       union ixgbe_adv_rx_desc *cur;
+       struct ixv_rx_buf       *rbuf, *nbuf;
+
+       IXV_RX_LOCK(rxr);
+
+       for (i = rxr->next_to_check; count != 0;) {
+               struct mbuf     *sendmp, *mh, *mp;
+               u32             rsc, ptype;
+               u16             hlen, plen, hdr, vtag;
+               bool            eop;
+               /* Sync the ring. */
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+               cur = &rxr->rx_base[i];
+               staterr = le32toh(cur->wb.upper.status_error);
+
+               if ((staterr & IXGBE_RXD_STAT_DD) == 0)
+                       break;
+               if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+                       break;
+
+               count--;
+               sendmp = NULL;
+               nbuf = NULL;
+               rsc = 0;
+               cur->wb.upper.status_error = 0;
+               rbuf = &rxr->rx_buffers[i];
+               mh = rbuf->m_head;
+               mp = rbuf->m_pack;
+
+               plen = le16toh(cur->wb.upper.length);
+               ptype = le32toh(cur->wb.lower.lo_dword.data) &
+                   IXGBE_RXDADV_PKTTYPE_MASK;
+               hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info);
+               vtag = le16toh(cur->wb.upper.vlan);
+               eop = ((staterr & IXGBE_RXD_STAT_EOP) != 0);
+
+               /* Make sure all parts of a bad packet are discarded */
+               if (((staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) ||
+                   (rxr->discard)) {
+                       ifp->if_ierrors++;
+                       rxr->rx_discarded++;
+                       if (!eop)
+                               rxr->discard = TRUE;
+                       else
+                               rxr->discard = FALSE;
+                       ixv_rx_discard(rxr, i);
+                       goto next_desc;
+               }
+
+               if (!eop) {
+                       nextp = i + 1;
+                       if (nextp == adapter->num_rx_desc)
+                               nextp = 0;
+                       nbuf = &rxr->rx_buffers[nextp];
+                       prefetch(nbuf);
+               }
+               /*
+               ** The header mbuf is ONLY used when header 
+               ** split is enabled, otherwise we get normal 
+               ** behavior, ie, both header and payload
+               ** are DMA'd into the payload buffer.
+               **
+               ** Rather than using the fmp/lmp global pointers
+               ** we now keep the head of a packet chain in the
+               ** buffer struct and pass this along from one
+               ** descriptor to the next, until we get EOP.
+               */
+               if (rxr->hdr_split && (rbuf->fmp == NULL)) {
+                       /* This must be an initial descriptor */
+                       hlen = (hdr & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                           IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+                       if (hlen > IXV_RX_HDR)
+                               hlen = IXV_RX_HDR;
+                       mh->m_len = hlen;
+                       mh->m_flags |= M_PKTHDR;
+                       mh->m_next = NULL;
+                       mh->m_pkthdr.len = mh->m_len;
+                       /* Null buf pointer so it is refreshed */
+                       rbuf->m_head = NULL;
+                       /*
+                       ** Check the payload length, this
+                       ** could be zero if its a small
+                       ** packet.
+                       */
+                       if (plen > 0) {
+                               mp->m_len = plen;
+                               mp->m_next = NULL;
+                               mp->m_flags &= ~M_PKTHDR;
+                               mh->m_next = mp;
+                               mh->m_pkthdr.len += mp->m_len;
+                               /* Null buf pointer so it is refreshed */
+                               rbuf->m_pack = NULL;
+                               rxr->rx_split_packets++;
+                       }
+                       /*
+                       ** Now create the forward
+                       ** chain so when complete 
+                       ** we wont have to.
+                       */
+                        if (eop == 0) {
+                               /* stash the chain head */
+                                nbuf->fmp = mh;
+                               /* Make forward chain */
+                                if (plen)
+                                        mp->m_next = nbuf->m_pack;
+                                else
+                                        mh->m_next = nbuf->m_pack;
+                        } else {
+                               /* Singlet, prepare to send */
+                                sendmp = mh;
+                               if ((adapter->num_vlans) &&
+                                   (staterr & IXGBE_RXD_STAT_VP)) {
+                                        sendmp->m_pkthdr.ether_vtag = vtag;
+                                        sendmp->m_flags |= M_VLANTAG;
+                                }
+                        }
+               } else {
+                       /*
+                       ** Either no header split, or a
+                       ** secondary piece of a fragmented
+                       ** split packet.
+                       */
+                       mp->m_len = plen;
+                       /*
+                       ** See if there is a stored head
+                       ** that determines what we are
+                       */
+                       sendmp = rbuf->fmp;
+                       rbuf->m_pack = rbuf->fmp = NULL;
+
+                       if (sendmp != NULL) /* secondary frag */
+                               sendmp->m_pkthdr.len += mp->m_len;
+                       else {
+                               /* first desc of a non-ps chain */
+                               sendmp = mp;
+                               sendmp->m_flags |= M_PKTHDR;
+                               sendmp->m_pkthdr.len = mp->m_len;
+                               if (staterr & IXGBE_RXD_STAT_VP) {
+                                       sendmp->m_pkthdr.ether_vtag = vtag;
+                                       sendmp->m_flags |= M_VLANTAG;
+                               }
+                        }
+                       /* Pass the head pointer on */
+                       if (eop == 0) {
+                               nbuf->fmp = sendmp;
+                               sendmp = NULL;
+                               mp->m_next = nbuf->m_pack;
+                       }
+               }
+               ++processed;
+               /* Sending this frame? */
+               if (eop) {
+                       sendmp->m_pkthdr.rcvif = ifp;
+                       ifp->if_ipackets++;
+                       rxr->rx_packets++;
+                       /* capture data for AIM */
+                       rxr->bytes += sendmp->m_pkthdr.len;
+                       rxr->rx_bytes += sendmp->m_pkthdr.len;
+                       if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
+                               ixv_rx_checksum(staterr, sendmp, ptype);
+#if __FreeBSD_version >= 800000
+                       sendmp->m_pkthdr.flowid = que->msix;
+                       sendmp->m_flags |= M_FLOWID;
+#endif
+               }
+next_desc:
+               bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map,
+                   BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+               /* Advance our pointers to the next descriptor. */
+               if (++i == adapter->num_rx_desc)
+                       i = 0;
+
+               /* Now send to the stack or do LRO */
+               if (sendmp != NULL)
+                       ixv_rx_input(rxr, ifp, sendmp, ptype);
+
+               /* Every 8 descriptors we go to refresh mbufs */
+               if (processed == 8) {
+                       ixv_refresh_mbufs(rxr, i);
+                       processed = 0;
+               }
+       }
+
+       /* Refresh any remaining buf structs */
+       if (ixv_rx_unrefreshed(rxr))
+               ixv_refresh_mbufs(rxr, i);
+
+       rxr->next_to_check = i;
+
+       /*
+        * Flush any outstanding LRO work
+        */
+       while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
+               SLIST_REMOVE_HEAD(&lro->lro_active, next);
+               tcp_lro_flush(lro, queued);
+       }
+
+       IXV_RX_UNLOCK(rxr);
+
+       /*
+       ** We still have cleaning to do?
+       ** Schedule another interrupt if so.
+       */
+       if ((staterr & IXGBE_RXD_STAT_DD) != 0) {
+               ixv_rearm_queues(adapter, (u64)(1 << que->msix));
+               return (TRUE);
+       }
+
+       return (FALSE);
+}
+
+
+/*********************************************************************
+ *
+ *  Verify that the hardware indicated that the checksum is valid.
+ *  Inform the stack about the status of checksum so that stack
+ *  doesn't spend time verifying the checksum.
+ *
+ *********************************************************************/
+static void
+ixv_rx_checksum(u32 staterr, struct mbuf * mp, u32 ptype)
+{
+       u16     status = (u16) staterr;
+       u8      errors = (u8) (staterr >> 24);
+       bool    sctp = FALSE;
+
+       if ((ptype & IXGBE_RXDADV_PKTTYPE_ETQF) == 0 &&
+           (ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)
+               sctp = TRUE;
+
+       if (status & IXGBE_RXD_STAT_IPCS) {
+               if (!(errors & IXGBE_RXD_ERR_IPE)) {
+                       /* IP Checksum Good */
+                       mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
+                       mp->m_pkthdr.csum_flags |= CSUM_IP_VALID;
+
+               } else
+                       mp->m_pkthdr.csum_flags = 0;
+       }
+       if (status & IXGBE_RXD_STAT_L4CS) {
+               u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+#if __FreeBSD_version >= 800000
+               if (sctp)
+                       type = CSUM_SCTP_VALID;
+#endif
+               if (!(errors & IXGBE_RXD_ERR_TCPE)) {
+                       mp->m_pkthdr.csum_flags |= type;
+                       if (!sctp)
+                               mp->m_pkthdr.csum_data = htons(0xffff);
+               } 
+       }
+       return;
+}
+
+static void
+ixv_setup_vlan_support(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32             ctrl, vid, vfta, retry;
+
+
+       /*
+       ** We get here thru init_locked, meaning
+       ** a soft reset, this has already cleared
+       ** the VFTA and other state, so if there
+       ** have been no vlan's registered do nothing.
+       */
+       if (adapter->num_vlans == 0)
+               return;
+
+       /* Enable the queues */
+       for (int i = 0; i < adapter->num_queues; i++) {
+               ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               ctrl |= IXGBE_RXDCTL_VME;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+       }
+
+       /*
+       ** A soft reset zero's out the VFTA, so
+       ** we need to repopulate it now.
+       */
+       for (int i = 0; i < VFTA_SIZE; i++) {
+               if (ixv_shadow_vfta[i] == 0)
+                       continue;
+               vfta = ixv_shadow_vfta[i];
+               /*
+               ** Reconstruct the vlan id's
+               ** based on the bits set in each
+               ** of the array ints.
+               */
+               for ( int j = 0; j < 32; j++) {
+                       retry = 0;
+                       if ((vfta & (1 << j)) == 0)
+                               continue;
+                       vid = (i * 32) + j;
+                       /* Call the shared code mailbox routine */
+                       while (ixgbe_set_vfta(hw, vid, 0, TRUE)) {
+                               if (++retry > 5)
+                                       break;
+                       }
+               }
+       }
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+static void
+ixv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)   /* Not our event */
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXV_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       ixv_shadow_vfta[index] |= (1 << bit);
+       ++adapter->num_vlans;
+       /* Re-init to load the changes */
+       ixv_init_locked(adapter);
+       IXV_CORE_UNLOCK(adapter);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+static void
+ixv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+       struct adapter  *adapter = ifp->if_softc;
+       u16             index, bit;
+
+       if (ifp->if_softc !=  arg)
+               return;
+
+       if ((vtag == 0) || (vtag > 4095))       /* Invalid */
+               return;
+
+       IXV_CORE_LOCK(adapter);
+       index = (vtag >> 5) & 0x7F;
+       bit = vtag & 0x1F;
+       ixv_shadow_vfta[index] &= ~(1 << bit);
+       --adapter->num_vlans;
+       /* Re-init to load the changes */
+       ixv_init_locked(adapter);
+       IXV_CORE_UNLOCK(adapter);
+}
+
+static void
+ixv_enable_intr(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ix_queue *que = adapter->queues;
+       u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+
+
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+       mask = IXGBE_EIMS_ENABLE_MASK;
+       mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+
+        for (int i = 0; i < adapter->num_queues; i++, que++)
+               ixv_enable_queue(adapter, que->msix);
+
+       IXGBE_WRITE_FLUSH(hw);
+
+       return;
+}
+
+static void
+ixv_disable_intr(struct adapter *adapter)
+{
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIAC, 0);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_VTEIMC, ~0);
+       IXGBE_WRITE_FLUSH(&adapter->hw);
+       return;
+}
+
+/*
+** Setup the correct IVAR register for a particular MSIX interrupt
+**  - entry is the register array entry
+**  - vector is the MSIX vector for this queue
+**  - type is RX/TX/MISC
+*/
+static void
+ixv_set_ivar(struct adapter *adapter, u8 entry, u8 vector, s8 type)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 ivar, index;
+
+       vector |= IXGBE_IVAR_ALLOC_VAL;
+
+       if (type == -1) { /* MISC IVAR */
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+               ivar &= ~0xFF;
+               ivar |= vector;
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+       } else {        /* RX/TX IVARS */
+               index = (16 * (entry & 1)) + (8 * type);
+               ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
+               ivar &= ~(0xFF << index);
+               ivar |= (vector << index);
+               IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
+       }
+}
+
+static void
+ixv_configure_ivars(struct adapter *adapter)
+{
+       struct  ix_queue *que = adapter->queues;
+
+        for (int i = 0; i < adapter->num_queues; i++, que++) {
+               /* First the RX queue entry */
+                ixv_set_ivar(adapter, i, que->msix, 0);
+               /* ... and the TX */
+               ixv_set_ivar(adapter, i, que->msix, 1);
+               /* Set an initial value in EITR */
+                IXGBE_WRITE_REG(&adapter->hw,
+                    IXGBE_VTEITR(que->msix), IXV_EITR_DEFAULT);
+       }
+
+       /* For the Link interrupt */
+        ixv_set_ivar(adapter, 1, adapter->mbxvec, -1);
+}
+
+
+/*
+** Tasklet handler for MSIX MBX interrupts
+**  - do outside interrupt since it might sleep
+*/
+static void
+ixv_handle_mbx(void *context, int pending)
+{
+       struct adapter  *adapter = context;
+
+       ixgbe_check_link(&adapter->hw,
+           &adapter->link_speed, &adapter->link_up, 0);
+       ixv_update_link_status(adapter);
+}
+
+/*
+** The VF stats registers never have a truely virgin
+** starting point, so this routine tries to make an
+** artificial one, marking ground zero on attach as
+** it were.
+*/
+static void
+ixv_save_stats(struct adapter *adapter)
+{
+       if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
+               adapter->stats.saved_reset_vfgprc +=
+                   adapter->stats.vfgprc - adapter->stats.base_vfgprc;
+               adapter->stats.saved_reset_vfgptc +=
+                   adapter->stats.vfgptc - adapter->stats.base_vfgptc;
+               adapter->stats.saved_reset_vfgorc +=
+                   adapter->stats.vfgorc - adapter->stats.base_vfgorc;
+               adapter->stats.saved_reset_vfgotc +=
+                   adapter->stats.vfgotc - adapter->stats.base_vfgotc;
+               adapter->stats.saved_reset_vfmprc +=
+                   adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+       }
+}
+static void
+ixv_init_stats(struct adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+       adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+       adapter->stats.last_vfgorc |=
+           (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+
+       adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+       adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+       adapter->stats.last_vfgotc |=
+           (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+
+       adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+       adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+       adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+       adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+       adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+       adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_STAT_32(reg, last, count)               \
+{                                                      \
+       u32 current = IXGBE_READ_REG(hw, reg);          \
+       if (current < last)                             \
+               count += 0x100000000LL;                 \
+       last = current;                                 \
+       count &= 0xFFFFFFFF00000000LL;                  \
+       count |= current;                               \
+}
+
+#define UPDATE_STAT_36(lsb, msb, last, count)          \
+{                                                      \
+       u64 cur_lsb = IXGBE_READ_REG(hw, lsb);          \
+       u64 cur_msb = IXGBE_READ_REG(hw, msb);          \
+       u64 current = ((cur_msb << 32) | cur_lsb);      \
+       if (current < last)                             \
+               count += 0x1000000000LL;                \
+       last = current;                                 \
+       count &= 0xFFFFFFF000000000LL;                  \
+       count |= current;                               \
+}
+
+/*
+** ixv_update_stats - Update the board statistics counters.
+*/
+void
+ixv_update_stats(struct adapter *adapter)
+{
+        struct ixgbe_hw *hw = &adapter->hw;
+
+        UPDATE_STAT_32(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+           adapter->stats.vfgprc);
+        UPDATE_STAT_32(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+           adapter->stats.vfgptc);
+        UPDATE_STAT_36(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+           adapter->stats.last_vfgorc, adapter->stats.vfgorc);
+        UPDATE_STAT_36(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+           adapter->stats.last_vfgotc, adapter->stats.vfgotc);
+        UPDATE_STAT_32(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+           adapter->stats.vfmprc);
+}
+
+/**********************************************************************
+ *
+ *  This routine is called only when ixgbe_display_debug_stats is enabled.
+ *  This routine provides a way to take a look at important statistics
+ *  maintained by the driver and hardware.
+ *
+ **********************************************************************/
+static void
+ixv_print_hw_stats(struct adapter * adapter)
+{
+        device_t dev = adapter->dev;
+
+        device_printf(dev,"Std Mbuf Failed = %lu\n",
+               adapter->mbuf_defrag_failed);
+        device_printf(dev,"Driver dropped packets = %lu\n",
+               adapter->dropped_pkts);
+        device_printf(dev, "watchdog timeouts = %ld\n",
+               adapter->watchdog_events);
+
+        device_printf(dev,"Good Packets Rcvd = %llu\n",
+               (long long)adapter->stats.vfgprc);
+        device_printf(dev,"Good Packets Xmtd = %llu\n",
+               (long long)adapter->stats.vfgptc);
+        device_printf(dev,"TSO Transmissions = %lu\n",
+               adapter->tso_tx);
+
+}
+
+/**********************************************************************
+ *
+ *  This routine is called only when em_display_debug_stats is enabled.
+ *  This routine provides a way to take a look at important statistics
+ *  maintained by the driver and hardware.
+ *
+ **********************************************************************/
+static void
+ixv_print_debug_info(struct adapter *adapter)
+{
+        device_t dev = adapter->dev;
+        struct ixgbe_hw         *hw = &adapter->hw;
+        struct ix_queue         *que = adapter->queues;
+        struct rx_ring          *rxr;
+        struct tx_ring          *txr;
+        struct lro_ctrl         *lro;
+
+        device_printf(dev,"Error Byte Count = %u \n",
+            IXGBE_READ_REG(hw, IXGBE_ERRBC));
+
+        for (int i = 0; i < adapter->num_queues; i++, que++) {
+                txr = que->txr;
+                rxr = que->rxr;
+                lro = &rxr->lro;
+                device_printf(dev,"QUE(%d) IRQs Handled: %lu\n",
+                    que->msix, (long)que->irqs);
+                device_printf(dev,"RX(%d) Packets Received: %lld\n",
+                    rxr->me, (long long)rxr->rx_packets);
+                device_printf(dev,"RX(%d) Split RX Packets: %lld\n",
+                    rxr->me, (long long)rxr->rx_split_packets);
+                device_printf(dev,"RX(%d) Bytes Received: %lu\n",
+                    rxr->me, (long)rxr->rx_bytes);
+                device_printf(dev,"RX(%d) LRO Queued= %d\n",
+                    rxr->me, lro->lro_queued);
+                device_printf(dev,"RX(%d) LRO Flushed= %d\n",
+                    rxr->me, lro->lro_flushed);
+                device_printf(dev,"TX(%d) Packets Sent: %lu\n",
+                    txr->me, (long)txr->total_packets);
+                device_printf(dev,"TX(%d) NO Desc Avail: %lu\n",
+                    txr->me, (long)txr->no_desc_avail);
+        }
+
+        device_printf(dev,"MBX IRQ Handled: %lu\n",
+            (long)adapter->mbx_irq);
+        return;
+}
+
+static int
+ixv_sysctl_stats(SYSCTL_HANDLER_ARGS)
+{
+       int             error;
+       int             result;
+       struct adapter *adapter;
+
+       result = -1;
+       error = sysctl_handle_int(oidp, &result, 0, req);
+
+       if (error || !req->newptr)
+               return (error);
+
+       if (result == 1) {
+               adapter = (struct adapter *) arg1;
+               ixv_print_hw_stats(adapter);
+       }
+       return error;
+}
+
+static int
+ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
+{
+       int error, result;
+       struct adapter *adapter;
+
+       result = -1;
+       error = sysctl_handle_int(oidp, &result, 0, req);
+
+       if (error || !req->newptr)
+               return (error);
+
+       if (result == 1) {
+               adapter = (struct adapter *) arg1;
+               ixv_print_debug_info(adapter);
+       }
+       return error;
+}
+
+/*
+** Set flow control using sysctl:
+** Flow control values:
+**     0 - off
+**     1 - rx pause
+**     2 - tx pause
+**     3 - full
+*/
+static int
+ixv_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+       int error;
+       struct adapter *adapter;
+
+       error = sysctl_handle_int(oidp, &ixv_flow_control, 0, req);
+
+       if (error)
+               return (error);
+
+       adapter = (struct adapter *) arg1;
+       switch (ixv_flow_control) {
+               case ixgbe_fc_rx_pause:
+               case ixgbe_fc_tx_pause:
+               case ixgbe_fc_full:
+                       adapter->hw.fc.requested_mode = ixv_flow_control;
+                       break;
+               case ixgbe_fc_none:
+               default:
+                       adapter->hw.fc.requested_mode = ixgbe_fc_none;
+       }
+
+       ixgbe_fc_enable(&adapter->hw, 0);
+       return error;
+}
+
+static void
+ixv_add_rx_process_limit(struct adapter *adapter, const char *name,
+        const char *description, int *limit, int value)
+{
+        *limit = value;
+        SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev),
+            SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
+            OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description);
+}
+
diff --git a/lib/librte_pmd_ixgbe/ixgbe/ixv.h b/lib/librte_pmd_ixgbe/ixgbe/ixv.h
new file mode 100644 (file)
index 0000000..fcd0e1d
--- /dev/null
@@ -0,0 +1,430 @@
+/******************************************************************************
+
+  Copyright (c) 2001-2010, Intel Corporation 
+  All rights reserved.
+  
+  Redistribution and use in source and binary forms, with or without 
+  modification, are permitted provided that the following conditions are met:
+  
+   1. Redistributions of source code must retain the above copyright notice, 
+      this list of conditions and the following disclaimer.
+  
+   2. Redistributions in binary form must reproduce the above copyright 
+      notice, this list of conditions and the following disclaimer in the 
+      documentation and/or other materials provided with the distribution.
+  
+   3. Neither the name of the Intel Corporation nor the names of its 
+      contributors may be used to endorse or promote products derived from 
+      this software without specific prior written permission.
+  
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
+  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
+  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
+  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
+  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
+  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
+  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
+  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
+  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+  POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXV_H_
+#define _IXV_H_
+
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/protosw.h>
+#include <sys/socket.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <net/bpf.h>
+#include <net/if_types.h>
+#include <net/if_vlan_var.h>
+
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp.h>
+#include <netinet/tcp_lro.h>
+#include <netinet/udp.h>
+
+#include <machine/in_cksum.h>
+
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <machine/clock.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+#include <sys/pcpu.h>
+#include <sys/smp.h>
+#include <machine/smp.h>
+
+#include "ixgbe_api.h"
+#include "ixgbe_vf.h"
+
+/* Tunables */
+
+/*
+ * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of transmit descriptors allocated by the driver. Increasing this
+ * value allows the driver to queue more transmits. Each descriptor is 16
+ * bytes. Performance tests have show the 2K value to be optimal for top
+ * performance.
+ */
+#define DEFAULT_TXD    1024
+#define PERFORM_TXD    2048
+#define MAX_TXD                4096
+#define MIN_TXD                64
+
+/*
+ * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the
+ * number of receive descriptors allocated for each RX queue. Increasing this
+ * value allows the driver to buffer more incoming packets. Each descriptor
+ * is 16 bytes.  A receive buffer is also allocated for each descriptor. 
+ * 
+ * Note: with 8 rings and a dual port card, it is possible to bump up 
+ *     against the system mbuf pool limit, you can tune nmbclusters
+ *     to adjust for this.
+ */
+#define DEFAULT_RXD    1024
+#define PERFORM_RXD    2048
+#define MAX_RXD                4096
+#define MIN_RXD                64
+
+/* Alignment for rings */
+#define DBA_ALIGN      128
+
+/*
+ * This parameter controls the maximum no of times the driver will loop in
+ * the isr. Minimum Value = 1
+ */
+#define MAX_LOOP       10
+
+/*
+ * This is the max watchdog interval, ie. the time that can
+ * pass between any two TX clean operations, such only happening
+ * when the TX hardware is functioning.
+ */
+#define IXV_WATCHDOG                   (10 * hz)
+
+/*
+ * This parameters control when the driver calls the routine to reclaim
+ * transmit descriptors.
+ */
+#define IXV_TX_CLEANUP_THRESHOLD       (adapter->num_tx_desc / 8)
+#define IXV_TX_OP_THRESHOLD            (adapter->num_tx_desc / 32)
+
+#define IXV_MAX_FRAME_SIZE     0x3F00
+
+/* Flow control constants */
+#define IXV_FC_PAUSE           0xFFFF
+#define IXV_FC_HI              0x20000
+#define IXV_FC_LO              0x10000
+
+/* Defines for printing debug information */
+#define DEBUG_INIT  0
+#define DEBUG_IOCTL 0
+#define DEBUG_HW    0
+
+#define INIT_DEBUGOUT(S)            if (DEBUG_INIT)  printf(S "\n")
+#define INIT_DEBUGOUT1(S, A)        if (DEBUG_INIT)  printf(S "\n", A)
+#define INIT_DEBUGOUT2(S, A, B)     if (DEBUG_INIT)  printf(S "\n", A, B)
+#define IOCTL_DEBUGOUT(S)           if (DEBUG_IOCTL) printf(S "\n")
+#define IOCTL_DEBUGOUT1(S, A)       if (DEBUG_IOCTL) printf(S "\n", A)
+#define IOCTL_DEBUGOUT2(S, A, B)    if (DEBUG_IOCTL) printf(S "\n", A, B)
+#define HW_DEBUGOUT(S)              if (DEBUG_HW) printf(S "\n")
+#define HW_DEBUGOUT1(S, A)          if (DEBUG_HW) printf(S "\n", A)
+#define HW_DEBUGOUT2(S, A, B)       if (DEBUG_HW) printf(S "\n", A, B)
+
+#define MAX_NUM_MULTICAST_ADDRESSES     128
+#define IXV_EITR_DEFAULT               128
+#define IXV_SCATTER                    32
+#define IXV_RX_HDR                     128
+#define MSIX_BAR                       3
+#define IXV_TSO_SIZE                   65535
+#define IXV_BR_SIZE                    4096
+#define IXV_LINK_ITR                   2000
+#define TX_BUFFER_SIZE         ((u32) 1514)
+#define VFTA_SIZE                      128
+
+/* Offload bits in mbuf flag */
+#define CSUM_OFFLOAD           (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP)
+
+/*
+ *****************************************************************************
+ * vendor_info_array
+ * 
+ * This array contains the list of Subvendor/Subdevice IDs on which the driver
+ * should load.
+ * 
+ *****************************************************************************
+ */
+typedef struct _ixv_vendor_info_t {
+       unsigned int    vendor_id;
+       unsigned int    device_id;
+       unsigned int    subvendor_id;
+       unsigned int    subdevice_id;
+       unsigned int    index;
+} ixv_vendor_info_t;
+
+
+struct ixv_tx_buf {
+       u32             eop_index;
+       struct mbuf     *m_head;
+       bus_dmamap_t    map;
+};
+
+struct ixv_rx_buf {
+       struct mbuf     *m_head;
+       struct mbuf     *m_pack;
+       struct mbuf     *fmp;
+       bus_dmamap_t    hmap;
+       bus_dmamap_t    pmap;
+};
+
+/*
+ * Bus dma allocation structure used by ixv_dma_malloc and ixv_dma_free.
+ */
+struct ixv_dma_alloc {
+       bus_addr_t              dma_paddr;
+       caddr_t                 dma_vaddr;
+       bus_dma_tag_t           dma_tag;
+       bus_dmamap_t            dma_map;
+       bus_dma_segment_t       dma_seg;
+       bus_size_t              dma_size;
+       int                     dma_nseg;
+};
+
+/*
+** Driver queue struct: this is the interrupt container
+**  for the associated tx and rx ring.
+*/
+struct ix_queue {
+       struct adapter          *adapter;
+       u32                     msix;           /* This queue's MSIX vector */
+       u32                     eims;           /* This queue's EIMS bit */
+       u32                     eitr_setting;
+       u32                     eitr;           /* cached reg */
+       struct resource         *res;
+       void                    *tag;
+       struct tx_ring          *txr;
+       struct rx_ring          *rxr;
+       struct task             que_task;
+       struct taskqueue        *tq;
+       u64                     irqs;
+};
+
+/*
+ * The transmit ring, one per queue
+ */
+struct tx_ring {
+        struct adapter         *adapter;
+       struct mtx              tx_mtx;
+       u32                     me;
+       bool                    watchdog_check;
+       int                     watchdog_time;
+       union ixgbe_adv_tx_desc *tx_base;
+       struct ixv_dma_alloc    txdma;
+       u32                     next_avail_desc;
+       u32                     next_to_clean;
+       struct ixv_tx_buf       *tx_buffers;
+       volatile u16            tx_avail;
+       u32                     txd_cmd;
+       bus_dma_tag_t           txtag;
+       char                    mtx_name[16];
+       struct buf_ring         *br;
+       /* Soft Stats */
+       u32                     bytes;
+       u32                     packets;
+       u64                     no_desc_avail;
+       u64                     total_packets;
+};
+
+
+/*
+ * The Receive ring, one per rx queue
+ */
+struct rx_ring {
+        struct adapter         *adapter;
+       struct mtx              rx_mtx;
+       u32                     me;
+       union ixgbe_adv_rx_desc *rx_base;
+       struct ixv_dma_alloc    rxdma;
+       struct lro_ctrl         lro;
+       bool                    lro_enabled;
+       bool                    hdr_split;
+       bool                    discard;
+        u32                    next_to_refresh;
+        u32                    next_to_check;
+       char                    mtx_name[16];
+       struct ixv_rx_buf       *rx_buffers;
+       bus_dma_tag_t           htag;
+       bus_dma_tag_t           ptag;
+
+       u32                     bytes; /* Used for AIM calc */
+       u32                     packets;
+
+       /* Soft stats */
+       u64                     rx_irq;
+       u64                     rx_split_packets;
+       u64                     rx_packets;
+       u64                     rx_bytes;
+       u64                     rx_discarded;
+};
+
+/* Our adapter structure */
+struct adapter {
+       struct ifnet            *ifp;
+       struct ixgbe_hw         hw;
+
+       struct ixgbe_osdep      osdep;
+       struct device           *dev;
+
+       struct resource         *pci_mem;
+       struct resource         *msix_mem;
+
+       /*
+        * Interrupt resources: this set is
+        * either used for legacy, or for Link
+        * when doing MSIX
+        */
+       void                    *tag;
+       struct resource         *res;
+
+       struct ifmedia          media;
+       struct callout          timer;
+       int                     msix;
+       int                     if_flags;
+
+       struct mtx              core_mtx;
+
+       eventhandler_tag        vlan_attach;
+       eventhandler_tag        vlan_detach;
+
+       u16                     num_vlans;
+       u16                     num_queues;
+
+       /* Info about the board itself */
+       bool                    link_active;
+       u16                     max_frame_size;
+       u32                     link_speed;
+       bool                    link_up;
+       u32                     mbxvec;
+
+       /* Mbuf cluster size */
+       u32                     rx_mbuf_sz;
+
+       /* Support for pluggable optics */
+       struct task             mbx_task;  /* Mailbox tasklet */
+       struct taskqueue        *tq;
+
+       /*
+       ** Queues: 
+       **   This is the irq holder, it has
+       **   and RX/TX pair or rings associated
+       **   with it.
+       */
+       struct ix_queue         *queues;
+
+       /*
+        * Transmit rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct tx_ring          *tx_rings;
+       int                     num_tx_desc;
+
+       /*
+        * Receive rings:
+        *      Allocated at run time, an array of rings.
+        */
+       struct rx_ring          *rx_rings;
+       int                     num_rx_desc;
+       u64                     que_mask;
+       u32                     rx_process_limit;
+
+       /* Misc stats maintained by the driver */
+       unsigned long           dropped_pkts;
+       unsigned long           mbuf_defrag_failed;
+       unsigned long           mbuf_header_failed;
+       unsigned long           mbuf_packet_failed;
+       unsigned long           no_tx_map_avail;
+       unsigned long           no_tx_dma_setup;
+       unsigned long           watchdog_events;
+       unsigned long           tso_tx;
+       unsigned long           mbx_irq;
+
+       struct ixgbevf_hw_stats stats;
+};
+
+
+#define IXV_CORE_LOCK_INIT(_sc, _name) \
+        mtx_init(&(_sc)->core_mtx, _name, "IXV Core Lock", MTX_DEF)
+#define IXV_CORE_LOCK_DESTROY(_sc)      mtx_destroy(&(_sc)->core_mtx)
+#define IXV_TX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->tx_mtx)
+#define IXV_RX_LOCK_DESTROY(_sc)        mtx_destroy(&(_sc)->rx_mtx)
+#define IXV_CORE_LOCK(_sc)              mtx_lock(&(_sc)->core_mtx)
+#define IXV_TX_LOCK(_sc)                mtx_lock(&(_sc)->tx_mtx)
+#define IXV_TX_TRYLOCK(_sc)             mtx_trylock(&(_sc)->tx_mtx)
+#define IXV_RX_LOCK(_sc)                mtx_lock(&(_sc)->rx_mtx)
+#define IXV_CORE_UNLOCK(_sc)            mtx_unlock(&(_sc)->core_mtx)
+#define IXV_TX_UNLOCK(_sc)              mtx_unlock(&(_sc)->tx_mtx)
+#define IXV_RX_UNLOCK(_sc)              mtx_unlock(&(_sc)->rx_mtx)
+#define IXV_CORE_LOCK_ASSERT(_sc)       mtx_assert(&(_sc)->core_mtx, MA_OWNED)
+#define IXV_TX_LOCK_ASSERT(_sc)         mtx_assert(&(_sc)->tx_mtx, MA_OWNED)
+
+/* Workaround to make 8.0 buildable */
+#if __FreeBSD_version < 800504
+static __inline int
+drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br)
+{
+#ifdef ALTQ
+        if (ALTQ_IS_ENABLED(&ifp->if_snd))
+                return (1);
+#endif
+        return (!buf_ring_empty(br));
+}
+#endif
+
+/*
+** Find the number of unrefreshed RX descriptors
+*/
+static inline u16
+ixv_rx_unrefreshed(struct rx_ring *rxr)
+{       
+       struct adapter  *adapter = rxr->adapter;
+        
+       if (rxr->next_to_check > rxr->next_to_refresh)
+               return (rxr->next_to_check - rxr->next_to_refresh - 1);
+       else
+               return ((adapter->num_rx_desc + rxr->next_to_check) -
+                   rxr->next_to_refresh - 1);
+}       
+
+#endif /* _IXV_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.c b/lib/librte_pmd_ixgbe/ixgbe_ethdev.c
new file mode 100644 (file)
index 0000000..f3b3cda
--- /dev/null
@@ -0,0 +1,1609 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_alarm.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_atomic.h>
+#include <rte_malloc.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_vf.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/*
+ * High threshold controlling when to start sending XOFF frames. Must be at
+ * least 8 bytes less than receive packet buffer size. This value is in units
+ * of 1024 bytes.
+ */
+#define IXGBE_FC_HI    0x80
+
+/*
+ * Low threshold controlling when to start sending XON frames. This value is
+ * in units of 1024 bytes.
+ */
+#define IXGBE_FC_LO    0x40
+
+/* Timer value included in XOFF frames. */
+#define IXGBE_FC_PAUSE 0x680
+
+#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
+#define IXGBE_LINK_UP_CHECK_TIMEOUT   1000 /* ms */
+
+static int eth_ixgbe_dev_init(struct eth_driver *eth_drv,
+               struct rte_eth_dev *eth_dev);
+static int  ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+                               uint16_t nb_tx_q);
+static int  ixgbe_dev_start(struct rte_eth_dev *dev);
+static void ixgbe_dev_stop(struct rte_eth_dev *dev);
+static void ixgbe_dev_close(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int ixgbe_dev_link_update(struct rte_eth_dev *dev,
+                               int wait_to_complete);
+static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
+                               struct rte_eth_stats *stats);
+static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void ixgbe_dev_info_get(struct rte_eth_dev *dev,
+                               struct rte_eth_dev_info *dev_info);
+static void ixgbe_vlan_filter_set(struct rte_eth_dev *dev,
+                                 uint16_t vlan_id,
+                                 int on);
+static int ixgbe_dev_led_on(struct rte_eth_dev *dev);
+static int ixgbe_dev_led_off(struct rte_eth_dev *dev);
+static int  ixgbe_flow_ctrl_set(struct rte_eth_dev *dev,
+                               struct rte_eth_fc_conf *fc_conf);
+static void ixgbe_dev_link_status_print(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev);
+static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev);
+static void ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle,
+                                                       void *param);
+static void ixgbe_dev_interrupt_delayed_handler(void *param);
+static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+                               uint32_t index, uint32_t pool);
+static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
+
+/* For Virtual Function support */
+static int eth_ixgbevf_dev_init(struct eth_driver *eth_drv,
+               struct rte_eth_dev *eth_dev);
+static int  ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q,
+               uint16_t nb_tx_q);
+static int  ixgbevf_dev_start(struct rte_eth_dev *dev);
+static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
+static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
+static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
+static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
+
+/*
+ *  * Define VF Stats MACRO for Non "cleared on read" register
+ *   */
+#define UPDATE_VF_STAT(reg, last, cur)                         \
+{                                                               \
+       u32 latest = IXGBE_READ_REG(hw, reg);                   \
+       cur += latest - last;                                   \
+       last = latest;                                          \
+}
+
+#define UPDATE_VF_STAT_36BIT(lsb, msb, last, cur)                \
+{                                                                \
+       u64 new_lsb = IXGBE_READ_REG(hw, lsb);                   \
+       u64 new_msb = IXGBE_READ_REG(hw, msb);                   \
+       u64 latest = ((new_msb << 32) | new_lsb);                \
+       cur += (0x1000000000LL + latest - last) & 0xFFFFFFFFFLL; \
+       last = latest;                                           \
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id pci_id_ixgbe_map[] = {
+
+#undef RTE_LIBRTE_IGB_PMD
+#define RTE_PCI_DEV_ID_DECL(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
+#include "rte_pci_dev_ids.h"
+
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+
+/*
+ * The set of PCI devices this driver supports (for 82599 VF)
+ */
+static struct rte_pci_id pci_id_ixgbevf_map[] = {
+{
+       .vendor_id = PCI_VENDOR_ID_INTEL,
+       .device_id = IXGBE_DEV_ID_82599_VF,
+       .subsystem_vendor_id = PCI_ANY_ID,
+       .subsystem_device_id = PCI_ANY_ID,
+},
+{ .vendor_id = 0, /* sentinel */ },
+};
+
+static struct eth_dev_ops ixgbe_eth_dev_ops = {
+       .dev_configure        = ixgbe_dev_configure,
+       .dev_start            = ixgbe_dev_start,
+       .dev_stop             = ixgbe_dev_stop,
+       .dev_close            = ixgbe_dev_close,
+       .promiscuous_enable   = ixgbe_dev_promiscuous_enable,
+       .promiscuous_disable  = ixgbe_dev_promiscuous_disable,
+       .allmulticast_enable  = ixgbe_dev_allmulticast_enable,
+       .allmulticast_disable = ixgbe_dev_allmulticast_disable,
+       .link_update          = ixgbe_dev_link_update,
+       .stats_get            = ixgbe_dev_stats_get,
+       .stats_reset          = ixgbe_dev_stats_reset,
+       .dev_infos_get        = ixgbe_dev_info_get,
+       .vlan_filter_set      = ixgbe_vlan_filter_set,
+       .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+       .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
+       .dev_led_on           = ixgbe_dev_led_on,
+       .dev_led_off          = ixgbe_dev_led_off,
+       .flow_ctrl_set        = ixgbe_flow_ctrl_set,
+       .mac_addr_add         = ixgbe_add_rar,
+       .mac_addr_remove      = ixgbe_remove_rar,
+       .fdir_add_signature_filter    = ixgbe_fdir_add_signature_filter,
+       .fdir_update_signature_filter = ixgbe_fdir_update_signature_filter,
+       .fdir_remove_signature_filter = ixgbe_fdir_remove_signature_filter,
+       .fdir_infos_get               = ixgbe_fdir_info_get,
+       .fdir_add_perfect_filter      = ixgbe_fdir_add_perfect_filter,
+       .fdir_update_perfect_filter   = ixgbe_fdir_update_perfect_filter,
+       .fdir_remove_perfect_filter   = ixgbe_fdir_remove_perfect_filter,
+       .fdir_set_masks               = ixgbe_fdir_set_masks,
+};
+
+/*
+ * dev_ops for virtual function, bare necessities for basic vf
+ * operation have been implemented
+ */
+static struct eth_dev_ops ixgbevf_eth_dev_ops = {
+
+       .dev_configure        = ixgbevf_dev_configure,
+       .dev_start            = ixgbevf_dev_start,
+       .dev_stop             = ixgbevf_dev_stop,
+       .link_update          = ixgbe_dev_link_update,
+       .stats_get            = ixgbevf_dev_stats_get,
+       .stats_reset          = ixgbevf_dev_stats_reset,
+       .dev_close            = ixgbevf_dev_stop,
+
+       .dev_infos_get        = ixgbe_dev_info_get,
+       .rx_queue_setup       = ixgbe_dev_rx_queue_setup,
+       .tx_queue_setup       = ixgbe_dev_tx_queue_setup,
+};
+
+/**
+ * Atomically reads the link status information from global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = link;
+       struct rte_eth_link *src = &(dev->data->dev_link);
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+/**
+ * Atomically writes the link status information into global
+ * structure rte_eth_dev.
+ *
+ * @param dev
+ *   - Pointer to the structure rte_eth_dev to read from.
+ *   - Pointer to the buffer to be saved with the link status.
+ *
+ * @return
+ *   - On success, zero.
+ *   - On failure, negative value.
+ */
+static inline int
+rte_ixgbe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
+                               struct rte_eth_link *link)
+{
+       struct rte_eth_link *dst = &(dev->data->dev_link);
+       struct rte_eth_link *src = link;
+
+       if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
+                                       *(uint64_t *)src) == 0)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * This function is the same as ixgbe_is_sfp() in ixgbe/ixgbe.h.
+ */
+static inline int
+ixgbe_is_sfp(struct ixgbe_hw *hw)
+{
+       switch (hw->phy.type) {
+       case ixgbe_phy_sfp_avago:
+       case ixgbe_phy_sfp_ftl:
+       case ixgbe_phy_sfp_intel:
+       case ixgbe_phy_sfp_unknown:
+       case ixgbe_phy_sfp_passive_tyco:
+       case ixgbe_phy_sfp_passive_unknown:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * This function is based on ixgbe_disable_intr() in ixgbe/ixgbe.h.
+ */
+static void
+ixgbe_disable_intr(struct ixgbe_hw *hw)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, ~0);
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, 0xFFFF0000);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), ~0);
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), ~0);
+       }
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+/*
+ * This function resets queue statistics mapping registers.
+ * From Niantic datasheet, Initialization of Statistics section:
+ * "...if software requires the queue counters, the RQSMR and TQSM registers
+ * must be re-programmed following a device reset.
+ */
+static void
+ixgbe_reset_qstat_mappings(struct ixgbe_hw *hw)
+{
+       uint32_t i;
+       for(i = 0; i != 16; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0);
+       }
+}
+
+/*
+ * This function is based on code in ixgbe_attach() in ixgbe/ixgbe.c.
+ * It returns 0 on success.
+ */
+static int
+eth_ixgbe_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+                    struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       struct ixgbe_vfta * shadow_vfta =
+               IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
+       uint32_t ctrl_ext;
+       uint16_t csum;
+       int diag, i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       eth_dev->dev_ops = &ixgbe_eth_dev_ops;
+       eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
+       eth_dev->tx_pkt_burst = &ixgbe_xmit_pkts;
+
+       /* for secondary processes, we don't initialise any further as primary
+        * has already done this work. Only check we don't need a different
+        * RX function */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+               if (eth_dev->data->scattered_rx)
+                       eth_dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               return 0;
+       }
+       pci_dev = eth_dev->pci_dev;
+
+       /* Vendor and Device ID need to be set before init of shared code */
+       hw->device_id = pci_dev->id.device_id;
+       hw->vendor_id = pci_dev->id.vendor_id;
+       hw->hw_addr = (void *)pci_dev->mem_resource.addr;
+
+       /* Initialize the shared code */
+       diag = ixgbe_init_shared_code(hw);
+       if (diag != IXGBE_SUCCESS) {
+               PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
+               return -EIO;
+       }
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = ixgbe_fc_full;
+       hw->fc.current_mode = ixgbe_fc_full;
+       hw->fc.pause_time = IXGBE_FC_PAUSE;
+       hw->fc.low_water = IXGBE_FC_LO;
+       for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
+               hw->fc.high_water[i] = IXGBE_FC_HI;
+       hw->fc.send_xon = 1;
+
+       ixgbe_disable_intr(hw);
+
+       /* Make sure we have a good EEPROM before we read from it */
+       diag = ixgbe_validate_eeprom_checksum(hw, &csum);
+       if (diag != IXGBE_SUCCESS) {
+               PMD_INIT_LOG(ERR, "The EEPROM checksum is not valid: %d", diag);
+               return -EIO;
+       }
+
+       diag = ixgbe_init_hw(hw);
+
+       /*
+        * Devices with copper phys will fail to initialise if ixgbe_init_hw()
+        * is called too soon after the kernel driver unbinding/binding occurs.
+        * The failure occurs in ixgbe_identify_phy_generic() for all devices,
+        * but for non-copper devies, ixgbe_identify_sfp_module_generic() is
+        * also called. See ixgbe_identify_phy_82599(). The reason for the
+        * failure is not known, and only occuts when virtualisation features
+        * are disabled in the bios. A delay of 100ms  was found to be enough by
+        * trial-and-error, and is doubled to be safe.
+        */
+       if (diag && (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)) {
+               rte_delay_ms(200);
+               diag = ixgbe_init_hw(hw);
+       }
+
+       if (diag == IXGBE_ERR_EEPROM_VERSION) {
+               PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
+                   "LOM.  Please be aware there may be issues associated "
+                   "with your hardware.\n If you are experiencing problems "
+                   "please contact your Intel or hardware representative "
+                   "who provided you with this hardware.\n");
+       } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
+               PMD_INIT_LOG(ERR, "Unsupported SFP+ Module\n");
+       if (diag) {
+               PMD_INIT_LOG(ERR, "Hardware Initialization Failure: %d", diag);
+               return -EIO;
+       }
+
+       /* pick up the PCI bus settings for reporting later */
+       ixgbe_get_bus_info(hw);
+
+       /* reset mappings for queue statistics hw counters*/
+       ixgbe_reset_qstat_mappings(hw);
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
+                       hw->mac.num_rar_entries, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               PMD_INIT_LOG(ERR,
+                       "Failed to allocate %d bytes needed to store MAC addresses",
+                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+               return -ENOMEM;
+       }
+       /* Copy the permanent MAC address */
+       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+                       &eth_dev->data->mac_addrs[0]);
+
+       /* initialize the vfta */
+       memset(shadow_vfta, 0, sizeof(*shadow_vfta));
+
+       /* let hardware know driver is loaded */
+       ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+       ctrl_ext |= IXGBE_CTRL_EXT_DRV_LOAD;
+       IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+       if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
+               PMD_INIT_LOG(DEBUG,
+                            "MAC: %d, PHY: %d, SFP+: %d<n",
+                            (int) hw->mac.type, (int) hw->phy.type,
+                            (int) hw->phy.sfp_type);
+       else
+               PMD_INIT_LOG(DEBUG, "MAC: %d, PHY: %d\n",
+                            (int) hw->mac.type, (int) hw->phy.type);
+
+       PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+                       eth_dev->data->port_id, pci_dev->id.vendor_id,
+                       pci_dev->id.device_id);
+
+       rte_intr_callback_register(&(pci_dev->intr_handle),
+               ixgbe_dev_interrupt_handler, (void *)eth_dev);
+
+       return 0;
+}
+
+/*
+ * Virtual Function device init
+ */
+static int
+eth_ixgbevf_dev_init(__attribute__((unused)) struct eth_driver *eth_drv,
+                    struct rte_eth_dev *eth_dev)
+{
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
+       int diag;
+
+       PMD_INIT_LOG(DEBUG, "eth_ixgbevf_dev_init");
+
+       eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
+       pci_dev = eth_dev->pci_dev;
+
+       hw->device_id = pci_dev->id.device_id;
+       hw->vendor_id = pci_dev->id.vendor_id;
+       hw->hw_addr = (void *)pci_dev->mem_resource.addr;
+
+       /* Initialize the shared code */
+       diag = ixgbe_init_shared_code(hw);
+       if (diag != IXGBE_SUCCESS) {
+               PMD_INIT_LOG(ERR, "Shared code init failed for ixgbevf: %d", diag);
+               return -EIO;
+       }
+
+       /* init_mailbox_params */
+       hw->mbx.ops.init_params(hw);
+
+       /* Disable the interrupts for VF */
+       ixgbevf_intr_disable(hw);
+
+       hw->mac.num_rar_entries = hw->mac.max_rx_queues;
+       diag = hw->mac.ops.reset_hw(hw);
+
+       /* Allocate memory for storing MAC addresses */
+       eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
+                       hw->mac.num_rar_entries, 0);
+       if (eth_dev->data->mac_addrs == NULL) {
+               PMD_INIT_LOG(ERR,
+                       "Failed to allocate %d bytes needed to store MAC addresses",
+                       ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+               return -ENOMEM;
+       }
+       /* Copy the permanent MAC address */
+       ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
+                       &eth_dev->data->mac_addrs[0]);
+
+       /* reset the hardware with the new settings */
+       diag = hw->mac.ops.start_hw(hw);
+       switch (diag) {
+               case  0:
+                       break;
+
+               default:
+                       PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+                       return (diag);
+       }
+
+       PMD_INIT_LOG(DEBUG, "\nport %d vendorID=0x%x deviceID=0x%x mac.type=%s\n",
+                        eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id,
+                        "ixgbe_mac_82599_vf");
+
+       return 0;
+}
+
+static struct eth_driver rte_ixgbe_pmd = {
+       {
+               .name = "rte_ixgbe_pmd",
+               .id_table = pci_id_ixgbe_map,
+               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+       },
+       .eth_dev_init = eth_ixgbe_dev_init,
+       .dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * virtual function driver struct
+ */
+static struct eth_driver rte_ixgbevf_pmd = {
+       {
+               .name = "rte_ixgbevf_pmd",
+               .id_table = pci_id_ixgbevf_map,
+               .drv_flags = RTE_PCI_DRV_NEED_IGB_UIO,
+       },
+       .eth_dev_init = eth_ixgbevf_dev_init,
+       .dev_private_size = sizeof(struct ixgbe_adapter),
+};
+
+/*
+ * Driver initialization routine.
+ * Invoked once at EAL init time.
+ * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
+ */
+int
+rte_ixgbe_pmd_init(void)
+{
+       PMD_INIT_FUNC_TRACE();
+
+       rte_eth_driver_register(&rte_ixgbe_pmd);
+       return 0;
+}
+
+/*
+ * VF Driver initialization routine.
+ * Invoked one at EAL init time.
+ * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
+ */
+int
+rte_ixgbevf_pmd_init(void)
+{
+       DEBUGFUNC("rte_ixgbevf_pmd_init");
+
+       rte_eth_driver_register(&rte_ixgbevf_pmd);
+       return (0);
+}
+
+static void
+ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vfta * shadow_vfta =
+               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t vfta;
+       uint32_t vid_idx;
+       uint32_t vid_bit;
+
+       vid_idx = (uint32_t) ((vlan_id >> 5) & 0x7F);
+       vid_bit = (uint32_t) (1 << (vlan_id & 0x1F));
+       vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid_idx));
+       if (on)
+               vfta |= vid_bit;
+       else
+               vfta &= ~vid_bit;
+       IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid_idx), vfta);
+
+       /* update local VFTA copy */
+       shadow_vfta->vfta[vid_idx] = vfta;
+}
+
+static void
+ixgbe_vlan_hw_support_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t vlnctrl;
+       uint32_t rxdctl;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Disable */
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       vlnctrl &= ~IXGBE_VLNCTRL_VFE;
+
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               vlnctrl &= ~IXGBE_VLNCTRL_VME;
+       else {
+               /* On 82599 the VLAN enable is per/queue in RXDCTL */
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       rxdctl &= ~IXGBE_RXDCTL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+               }
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+}
+
+static void
+ixgbe_vlan_hw_support_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_vfta * shadow_vfta =
+               IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
+       uint32_t vlnctrl;
+       uint32_t rxdctl;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Filter Table Enable */
+       vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
+       vlnctrl |= IXGBE_VLNCTRL_VFE;
+
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               vlnctrl |= IXGBE_VLNCTRL_VME;
+       else {
+               /* On 82599 the VLAN enable is per/queue in RXDCTL */
+               for (i = 0; i < dev->data->nb_rx_queues; i++) {
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+                       rxdctl |= IXGBE_RXDCTL_VME;
+                       IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+               }
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
+       /* write whatever is in local vfta copy */
+       for (i = 0; i < IXGBE_VFTA_SIZE; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), shadow_vfta->vfta[i]);
+}
+
+static int
+ixgbe_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+{
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       int diag;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Allocate the array of pointers to RX queue structures */
+       diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
+                            "pointers to RX queues failed", dev->data->port_id,
+                            nb_rx_q);
+               return diag;
+       }
+
+       /* Allocate the array of pointers to TX queue structures */
+       diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
+                            "pointers to TX queues failed", dev->data->port_id,
+                            nb_tx_q);
+               return diag;
+       }
+
+       /* set flag to update link status after init */
+       intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+
+       return 0;
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+ixgbe_dev_start(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int err, link_up = 0, negotiate = 0;
+       uint32_t speed = 0;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* IXGBE devices don't support half duplex */
+       if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
+                       (dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
+               PMD_INIT_LOG(ERR, "Invalid link_duplex (%u) for port %u\n",
+                               dev->data->dev_conf.link_duplex,
+                               dev->data->port_id);
+               return -EINVAL;
+       }
+
+       /* stop adapter */
+       hw->adapter_stopped = FALSE;
+       ixgbe_stop_adapter(hw);
+
+       /* reinitialize adapter
+        * this calls reset and start */
+       ixgbe_init_hw(hw);
+
+       /* initialize transmission unit */
+       ixgbe_dev_tx_init(dev);
+
+       /* This can fail when allocating mbufs for descriptor rings */
+       err = ixgbe_dev_rx_init(dev);
+       if (err) {
+               PMD_INIT_LOG(ERR, "Unable to initialize RX hardware\n");
+               return err;
+       }
+
+       ixgbe_dev_rxtx_start(dev);
+
+       if (ixgbe_is_sfp(hw) && hw->phy.multispeed_fiber) {
+               err = hw->mac.ops.setup_sfp(hw);
+               if (err)
+                       goto error;
+       }
+
+       /* Turn on the laser */
+       if (hw->phy.multispeed_fiber)
+               ixgbe_enable_tx_laser(hw);
+
+       err = ixgbe_check_link(hw, &speed, &link_up, 0);
+       if (err)
+               goto error;
+       err = ixgbe_get_link_capabilities(hw, &speed, &negotiate);
+       if (err)
+               goto error;
+
+       switch(dev->data->dev_conf.link_speed) {
+       case ETH_LINK_SPEED_AUTONEG:
+               speed = (hw->mac.type != ixgbe_mac_82598EB) ?
+                               IXGBE_LINK_SPEED_82599_AUTONEG :
+                               IXGBE_LINK_SPEED_82598_AUTONEG;
+               break;
+       case ETH_LINK_SPEED_100:
+               /*
+                * Invalid for 82598 but error will be detected by
+                * ixgbe_setup_link()
+                */
+               speed = IXGBE_LINK_SPEED_100_FULL;
+               break;
+       case ETH_LINK_SPEED_1000:
+               speed = IXGBE_LINK_SPEED_1GB_FULL;
+               break;
+       case ETH_LINK_SPEED_10000:
+               speed = IXGBE_LINK_SPEED_10GB_FULL;
+               break;
+       default:
+               PMD_INIT_LOG(ERR, "Invalid link_speed (%u) for port %u\n",
+                               dev->data->dev_conf.link_speed, dev->data->port_id);
+               return -EINVAL;
+       }
+
+       err = ixgbe_setup_link(hw, speed, negotiate, link_up);
+       if (err)
+               goto error;
+
+       /* check if lsc interrupt is enabled */
+       if (dev->data->dev_conf.intr_conf.lsc != 0) {
+               err = ixgbe_dev_interrupt_setup(dev);
+               if (err)
+                       goto error;
+       }
+
+       /*
+        * If VLAN filtering is enabled, set up VLAN tag offload and filtering
+        * and restore VFTA.
+        */
+       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               ixgbe_vlan_hw_support_enable(dev);
+       else
+               ixgbe_vlan_hw_support_disable(dev);
+
+       if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+               err = ixgbe_fdir_configure(dev);
+               if (err)
+                       goto error;
+       }
+
+       return (0);
+
+error:
+       PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
+       return -EIO;
+}
+
+/*
+ * Stop device: disable rx and tx functions to allow for reconfiguring.
+ */
+static void
+ixgbe_dev_stop(struct rte_eth_dev *dev)
+{
+       struct rte_eth_link link;
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* disable interrupts */
+       ixgbe_disable_intr(hw);
+
+       /* reset the NIC */
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = FALSE;
+
+       /* stop adapter */
+       ixgbe_stop_adapter(hw);
+
+       /* Turn off the laser */
+       if (hw->phy.multispeed_fiber)
+               ixgbe_disable_tx_laser(hw);
+
+       ixgbe_dev_clear_queues(dev);
+
+       /* Clear recorded link status */
+       memset(&link, 0, sizeof(link));
+       rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+}
+
+/*
+ * Reest and stop device.
+ */
+static void
+ixgbe_dev_close(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       ixgbe_reset_hw(hw);
+
+
+       ixgbe_dev_stop(dev);
+       hw->adapter_stopped = 1;
+
+       ixgbe_disable_pcie_master(hw);
+
+       /* reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
+
+/*
+ * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c
+ */
+static void
+ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct ixgbe_hw *hw =
+                       IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw_stats *hw_stats =
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+       uint32_t bprc, lxon, lxoff, total;
+       uint64_t total_missed_rx, total_qbrc, total_qprc;
+       unsigned i;
+
+       total_missed_rx = 0;
+       total_qbrc = 0;
+       total_qprc = 0;
+
+       hw_stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
+       hw_stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
+       hw_stats->errbc += IXGBE_READ_REG(hw, IXGBE_ERRBC);
+       hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
+
+       for (i = 0; i < 8; i++) {
+               uint32_t mp;
+               mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+               /* global total per queue */
+               hw_stats->mpc[i] += mp;
+               /* Running comprehensive total for stats display */
+               total_missed_rx += hw_stats->mpc[i];
+               if (hw->mac.type == ixgbe_mac_82598EB)
+                       hw_stats->rnbc[i] +=
+                           IXGBE_READ_REG(hw, IXGBE_RNBC(i));
+               hw_stats->pxontxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
+               hw_stats->pxonrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
+               hw_stats->pxofftxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
+               hw_stats->pxoffrxc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+               hw_stats->pxon2offc[i] +=
+                   IXGBE_READ_REG(hw, IXGBE_PXON2OFFCNT(i));
+       }
+       for (i = 0; i < 16; i++) {
+               hw_stats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+               hw_stats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+               hw_stats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
+               hw_stats->qbrc[i] +=
+                   ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBRC_H(i)) << 32);
+               hw_stats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
+               hw_stats->qbtc[i] +=
+                   ((uint64_t)IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)) << 32);
+               hw_stats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+
+               total_qprc += hw_stats->qprc[i];
+               total_qbrc += hw_stats->qbrc[i];
+       }
+       hw_stats->mlfc += IXGBE_READ_REG(hw, IXGBE_MLFC);
+       hw_stats->mrfc += IXGBE_READ_REG(hw, IXGBE_MRFC);
+       hw_stats->rlec += IXGBE_READ_REG(hw, IXGBE_RLEC);
+
+       /* Note that gprc counts missed packets */
+       hw_stats->gprc += IXGBE_READ_REG(hw, IXGBE_GPRC);
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GORCH) << 32);
+               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_GOTCH) << 32);
+               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORL) +
+                   ((u64)IXGBE_READ_REG(hw, IXGBE_TORH) << 32);
+               hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
+               hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+       } else {
+               hw_stats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               hw_stats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               /* 82598 only has a counter in the high register */
+               hw_stats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               hw_stats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               hw_stats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+       }
+
+       /*
+        * Workaround: mprc hardware is incorrectly counting
+        * broadcasts, so for now we subtract those.
+        */
+       bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
+       hw_stats->bprc += bprc;
+       hw_stats->mprc += IXGBE_READ_REG(hw, IXGBE_MPRC);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               hw_stats->mprc -= bprc;
+
+       hw_stats->prc64 += IXGBE_READ_REG(hw, IXGBE_PRC64);
+       hw_stats->prc127 += IXGBE_READ_REG(hw, IXGBE_PRC127);
+       hw_stats->prc255 += IXGBE_READ_REG(hw, IXGBE_PRC255);
+       hw_stats->prc511 += IXGBE_READ_REG(hw, IXGBE_PRC511);
+       hw_stats->prc1023 += IXGBE_READ_REG(hw, IXGBE_PRC1023);
+       hw_stats->prc1522 += IXGBE_READ_REG(hw, IXGBE_PRC1522);
+
+       lxon = IXGBE_READ_REG(hw, IXGBE_LXONTXC);
+       hw_stats->lxontxc += lxon;
+       lxoff = IXGBE_READ_REG(hw, IXGBE_LXOFFTXC);
+       hw_stats->lxofftxc += lxoff;
+       total = lxon + lxoff;
+
+       hw_stats->gptc += IXGBE_READ_REG(hw, IXGBE_GPTC);
+       hw_stats->mptc += IXGBE_READ_REG(hw, IXGBE_MPTC);
+       hw_stats->ptc64 += IXGBE_READ_REG(hw, IXGBE_PTC64);
+       hw_stats->gptc -= total;
+       hw_stats->mptc -= total;
+       hw_stats->ptc64 -= total;
+       hw_stats->gotc -= total * ETHER_MIN_LEN;
+
+       hw_stats->ruc += IXGBE_READ_REG(hw, IXGBE_RUC);
+       hw_stats->rfc += IXGBE_READ_REG(hw, IXGBE_RFC);
+       hw_stats->roc += IXGBE_READ_REG(hw, IXGBE_ROC);
+       hw_stats->rjc += IXGBE_READ_REG(hw, IXGBE_RJC);
+       hw_stats->mngprc += IXGBE_READ_REG(hw, IXGBE_MNGPRC);
+       hw_stats->mngpdc += IXGBE_READ_REG(hw, IXGBE_MNGPDC);
+       hw_stats->mngptc += IXGBE_READ_REG(hw, IXGBE_MNGPTC);
+       hw_stats->tpr += IXGBE_READ_REG(hw, IXGBE_TPR);
+       hw_stats->tpt += IXGBE_READ_REG(hw, IXGBE_TPT);
+       hw_stats->ptc127 += IXGBE_READ_REG(hw, IXGBE_PTC127);
+       hw_stats->ptc255 += IXGBE_READ_REG(hw, IXGBE_PTC255);
+       hw_stats->ptc511 += IXGBE_READ_REG(hw, IXGBE_PTC511);
+       hw_stats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023);
+       hw_stats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522);
+       hw_stats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
+       hw_stats->xec += IXGBE_READ_REG(hw, IXGBE_XEC);
+       hw_stats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC);
+       hw_stats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST);
+       /* Only read FCOE on 82599 */
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               hw_stats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC);
+               hw_stats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC);
+               hw_stats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC);
+               hw_stats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
+               hw_stats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
+       }
+
+       if (stats == NULL)
+               return;
+
+       /* Fill out the rte_eth_stats statistics structure */
+       stats->ipackets = total_qprc;
+       stats->ibytes = total_qbrc;
+       stats->opackets = hw_stats->gptc;
+       stats->obytes = hw_stats->gotc;
+       stats->imcasts = hw_stats->mprc;
+
+       /* Rx Errors */
+       stats->ierrors = total_missed_rx + hw_stats->crcerrs +
+               hw_stats->rlec;
+
+       stats->oerrors  = 0;
+
+       /* Flow Director Stats registers */
+       hw_stats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
+       hw_stats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
+       stats->fdirmatch = hw_stats->fdirmatch;
+       stats->fdirmiss = hw_stats->fdirmiss;
+}
+
+static void
+ixgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw_stats *stats =
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* HW registers are cleared on read */
+       ixgbe_dev_stats_get(dev, NULL);
+
+       /* Reset software totals */
+       memset(stats, 0, sizeof(*stats));
+}
+
+static void
+ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+                         IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* Good Rx packet, include VF loopback */
+       UPDATE_VF_STAT(IXGBE_VFGPRC,
+           hw_stats->last_vfgprc, hw_stats->vfgprc);
+
+       /* Good Rx octets, include VF loopback */
+       UPDATE_VF_STAT_36BIT(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+           hw_stats->last_vfgorc, hw_stats->vfgorc);
+
+       /* Good Tx packet, include VF loopback */
+       UPDATE_VF_STAT(IXGBE_VFGPTC,
+           hw_stats->last_vfgptc, hw_stats->vfgptc);
+
+       /* Good Tx octets, include VF loopback */
+       UPDATE_VF_STAT_36BIT(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+           hw_stats->last_vfgotc, hw_stats->vfgotc);
+
+       /* Rx Multicst Packet */
+       UPDATE_VF_STAT(IXGBE_VFMPRC,
+           hw_stats->last_vfmprc, hw_stats->vfmprc);
+
+       if (stats == NULL)
+               return;
+
+       memset(stats, 0, sizeof(*stats));
+       stats->ipackets = hw_stats->vfgprc;
+       stats->ibytes = hw_stats->vfgorc;
+       stats->opackets = hw_stats->vfgptc;
+       stats->obytes = hw_stats->vfgotc;
+       stats->imcasts = hw_stats->vfmprc;
+}
+
+static void
+ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
+{
+       struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+                       IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
+
+       /* Sync HW register to the last stats */
+       ixgbevf_dev_stats_get(dev, NULL);
+
+       /* reset HW current stats*/
+       hw_stats->vfgprc = 0;
+       hw_stats->vfgorc = 0;
+       hw_stats->vfgptc = 0;
+       hw_stats->vfgotc = 0;
+       hw_stats->vfmprc = 0;
+
+}
+
+static void
+ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       dev_info->max_rx_queues = hw->mac.max_rx_queues;
+       dev_info->max_tx_queues = hw->mac.max_tx_queues;
+       dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL register */
+       dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS register */
+       dev_info->max_mac_addrs = hw->mac.num_rar_entries;
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct rte_eth_link link, old;
+       ixgbe_link_speed link_speed;
+       int link_up;
+       int diag;
+
+       link.link_status = 0;
+       link.link_speed = 0;
+       link.link_duplex = 0;
+       memset(&old, 0, sizeof(old));
+       rte_ixgbe_dev_atomic_read_link_status(dev, &old);
+
+       /* check if it needs to wait to complete, if lsc interrupt is enabled */
+       if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
+               diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+       else
+               diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+       if (diag != 0) {
+               link.link_speed = ETH_LINK_SPEED_100;
+               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+               if (link.link_status == old.link_status)
+                       return -1;
+               return 0;
+       }
+
+       if (link_up == 0) {
+               rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+               if (link.link_status == old.link_status)
+                       return -1;
+               return 0;
+       }
+       link.link_status = 1;
+       link.link_duplex = ETH_LINK_FULL_DUPLEX;
+
+       switch (link_speed) {
+       default:
+       case IXGBE_LINK_SPEED_UNKNOWN:
+               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               link.link_speed = ETH_LINK_SPEED_100;
+               break;
+
+       case IXGBE_LINK_SPEED_100_FULL:
+               link.link_speed = ETH_LINK_SPEED_100;
+               break;
+
+       case IXGBE_LINK_SPEED_1GB_FULL:
+               link.link_speed = ETH_LINK_SPEED_1000;
+               break;
+
+       case IXGBE_LINK_SPEED_10GB_FULL:
+               link.link_speed = ETH_LINK_SPEED_10000;
+               break;
+       }
+       rte_ixgbe_dev_atomic_write_link_status(dev, &link);
+
+       if (link.link_status == old.link_status)
+               return -1;
+
+       return 0;
+}
+
+static void
+ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fctrl;
+
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fctrl;
+
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl &= (~IXGBE_FCTRL_UPE);
+       if (dev->data->all_multicast == 1)
+               fctrl |= IXGBE_FCTRL_MPE;
+       else
+               fctrl &= (~IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fctrl;
+
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_MPE;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+static void
+ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fctrl;
+
+       if (dev->data->promiscuous == 1)
+               return; /* must remain in all_multicast mode */
+
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl &= (~IXGBE_FCTRL_MPE);
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+}
+
+/**
+ * It clears the interrupt causes and enables the interrupt.
+ * It will be called once only during nic initialized.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_setup(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbe_dev_link_status_print(dev);
+       IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
+       IXGBE_WRITE_FLUSH(hw);
+       rte_intr_enable(&(dev->pci_dev->intr_handle));
+
+       return 0;
+}
+
+/*
+ * It reads ICR and sets flag (IXGBE_EICR_LSC) for the link_update.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+       uint32_t eicr;
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_LSC);
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* read-on-clear nic registers here */
+       eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
+       PMD_INIT_LOG(INFO, "eicr %x", eicr);
+       if (eicr & IXGBE_EICR_LSC) {
+               /* set flag for async link update */
+               intr->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+       }
+
+       return 0;
+}
+
+/**
+ * It gets and then prints the link status.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static void
+ixgbe_dev_link_status_print(struct rte_eth_dev *dev)
+{
+       struct rte_eth_link link;
+
+       memset(&link, 0, sizeof(link));
+       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+       if (link.link_status) {
+               PMD_INIT_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
+                                       (int)(dev->data->port_id),
+                                       (unsigned)link.link_speed,
+                       link.link_duplex == ETH_LINK_FULL_DUPLEX ?
+                                       "full-duplex" : "half-duplex");
+       } else {
+               PMD_INIT_LOG(INFO, " Port %d: Link Down",
+                               (int)(dev->data->port_id));
+       }
+       PMD_INIT_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
+                               dev->pci_dev->addr.domain,
+                               dev->pci_dev->addr.bus,
+                               dev->pci_dev->addr.devid,
+                               dev->pci_dev->addr.function);
+}
+
+/*
+ * It executes link_update after knowing an interrupt occured.
+ *
+ * @param dev
+ *  Pointer to struct rte_eth_dev.
+ *
+ * @return
+ *  - On success, zero.
+ *  - On failure, a negative value.
+ */
+static int
+ixgbe_dev_interrupt_action(struct rte_eth_dev *dev)
+{
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) {
+               return -1;
+       }
+       ixgbe_dev_link_update(dev, 0);
+
+       return 0;
+}
+
+/**
+ * Interrupt handler which shall be registered for alarm callback for delayed
+ * handling specific interrupt to wait for the stable nic state. As the
+ * NIC interrupt state is not stable for ixgbe after link is just down,
+ * it needs to wait 4 seconds to get the stable status.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+ixgbe_dev_interrupt_delayed_handler(void *param)
+{
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       IXGBE_READ_REG(hw, IXGBE_EICR);
+       ixgbe_dev_interrupt_action(dev);
+       if (intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
+               intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
+               rte_intr_enable(&(dev->pci_dev->intr_handle));
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_LSC);
+               IXGBE_WRITE_FLUSH(hw);
+               ixgbe_dev_link_status_print(dev);
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+       }
+}
+
+/**
+ * Interrupt handler triggered by NIC  for handling
+ * specific interrupt.
+ *
+ * @param handle
+ *  Pointer to interrupt handle.
+ * @param param
+ *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ *  void
+ */
+static void
+ixgbe_dev_interrupt_handler(struct rte_intr_handle *handle, void *param)
+{
+       int64_t timeout;
+       struct rte_eth_link link;
+       struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+       struct ixgbe_interrupt *intr =
+               IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+       /* get the link status before link update, for predicting later */
+       memset(&link, 0, sizeof(link));
+       rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+       ixgbe_dev_interrupt_get_status(dev);
+       ixgbe_dev_interrupt_action(dev);
+
+       if (!(intr->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
+               return;
+
+       /* likely to up */
+       if (!link.link_status)
+               /* handle it 1 sec later, wait it being stable */
+               timeout = IXGBE_LINK_UP_CHECK_TIMEOUT;
+       /* likely to down */
+       else
+               /* handle it 4 sec later, wait it being stable */
+               timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
+
+       ixgbe_dev_link_status_print(dev);
+       if (rte_eal_alarm_set(timeout * 1000,
+               ixgbe_dev_interrupt_delayed_handler, param) < 0)
+               PMD_INIT_LOG(ERR, "Error setting alarm");
+}
+
+static int
+ixgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+}
+
+static int
+ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct ixgbe_hw *hw;
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
+               ixgbe_fc_none,
+               ixgbe_fc_rx_pause,
+               ixgbe_fc_tx_pause,
+               ixgbe_fc_full
+       };
+
+       PMD_INIT_FUNC_TRACE();
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       rx_buf_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x \n", rx_buf_size);
+
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for ixgbe
+        */
+       max_high_water = (rx_buf_size - ETHER_MAX_LEN) >> IXGBE_RXPBSIZE_SHIFT;
+       if ((fc_conf->high_water > max_high_water) ||
+               (fc_conf->high_water < fc_conf->low_water)) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB\n");
+               PMD_INIT_LOG(ERR, "High_water must <=  0x%x\n", max_high_water);
+               return (-EINVAL);
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
+       hw->fc.pause_time     = fc_conf->pause_time;
+       hw->fc.high_water[0]  = fc_conf->high_water;
+       hw->fc.low_water      = fc_conf->low_water;
+       hw->fc.send_xon       = fc_conf->send_xon;
+
+       err = ixgbe_fc_enable(hw, 0);
+       /* Not negotiated is not an error case */
+       if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) {
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "ixgbe_fc_enable = 0x%x \n", err);
+       return -EIO;
+}
+
+static void
+ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+                               uint32_t index, uint32_t pool)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t enable_addr = 1;
+
+       ixgbe_set_rar(hw, index, mac_addr->addr_bytes, pool, enable_addr);
+}
+
+static void
+ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ixgbe_clear_rar(hw, index);
+}
+
+/*
+ * Virtual Function operations
+ */
+static void
+ixgbevf_intr_disable(struct ixgbe_hw *hw)
+{
+       PMD_INIT_LOG(DEBUG, "ixgbevf_intr_disable");
+
+       /* Clear interrupt mask to stop from interrupts being generated */
+       IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+       IXGBE_WRITE_FLUSH(hw);
+}
+
+static int
+ixgbevf_dev_configure(struct rte_eth_dev *dev, uint16_t nb_rx_q, uint16_t nb_tx_q)
+{
+       int diag;
+       struct rte_eth_conf* conf = &dev->data->dev_conf;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Allocate the array of pointers to RX queue structures */
+       diag = ixgbe_dev_rx_queue_alloc(dev, nb_rx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
+                            "pointers to RX queues failed", dev->data->port_id,
+                            nb_rx_q);
+               return diag;
+       }
+
+       /* Allocate the array of pointers to TX queue structures */
+       diag = ixgbe_dev_tx_queue_alloc(dev, nb_tx_q);
+       if (diag != 0) {
+               PMD_INIT_LOG(ERR, "ethdev port_id=%d allocation of array of %d"
+                            "pointers to TX queues failed", dev->data->port_id,
+                            nb_tx_q);
+               return diag;
+       }
+
+       if (!conf->rxmode.hw_strip_crc) {
+               /*
+                * VF has no ability to enable/disable HW CRC
+                * Keep the persistent behavior the same as Host PF
+                */
+               PMD_INIT_LOG(INFO, "VF can't disable HW CRC Strip\n");
+               conf->rxmode.hw_strip_crc = 1;
+       }
+
+       return 0;
+}
+
+static int
+ixgbevf_dev_start(struct rte_eth_dev *dev)
+{
+       int err = 0;
+       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_start");
+
+       ixgbevf_dev_tx_init(dev);
+       err = ixgbevf_dev_rx_init(dev);
+       if(err){
+               ixgbe_dev_clear_queues(dev);
+               PMD_INIT_LOG(ERR,"Unable to initialize RX hardware\n");
+               return err;
+       }
+       ixgbevf_dev_rxtx_start(dev);
+
+       return 0;
+}
+
+static void
+ixgbevf_dev_stop(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_LOG(DEBUG, "ixgbevf_dev_stop");
+
+       ixgbe_reset_hw(hw);
+       hw->adapter_stopped = 0;
+       ixgbe_stop_adapter(hw);
+       /* reprogram the RAR[0] in case user changed it. */
+       ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_ethdev.h b/lib/librte_pmd_ixgbe/ixgbe_ethdev.h
new file mode 100644 (file)
index 0000000..1df3a88
--- /dev/null
@@ -0,0 +1,176 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _IXGBE_ETHDEV_H_
+#define _IXGBE_ETHDEV_H_
+
+/* need update link, bit flag */
+#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
+
+/*
+ * Defines that were not part of ixgbe_type.h as they are not used by the
+ * FreeBSD driver.
+ */
+#define IXGBE_ADVTXD_MAC_1588       0x00080000 /* IEEE1588 Timestamp packet */
+#define IXGBE_RXD_STAT_TMST         0x10000    /* Timestamped Packet indication */
+#define IXGBE_ADVTXD_TUCMD_L4T_RSV  0x00001800 /* L4 Packet TYPE, resvd  */
+#define IXGBE_RXDADV_ERR_CKSUM_BIT  30
+#define IXGBE_RXDADV_ERR_CKSUM_MSK  3
+#define IXGBE_ADVTXD_MACLEN_SHIFT   9          /* Bit shift for l2_len */
+
+#define IXGBE_VFTA_SIZE 128
+
+/*
+ * Information about the fdir mode.
+ */
+struct ixgbe_hw_fdir_info {
+       uint16_t    collision;
+       uint16_t    free;
+       uint16_t    maxhash;
+       uint8_t     maxlen;
+       uint64_t    add;
+       uint64_t    remove;
+       uint64_t    f_add;
+       uint64_t    f_remove;
+};
+
+/* structure for interrupt relative data */
+struct ixgbe_interrupt {
+       uint32_t flags;
+};
+
+/* local VFTA copy */
+struct ixgbe_vfta {
+       uint32_t vfta[IXGBE_VFTA_SIZE];
+};
+
+/*
+ * Structure to store private data for each driver instance (for each port).
+ */
+struct ixgbe_adapter {
+       struct ixgbe_hw             hw;
+       struct ixgbe_hw_stats       stats;
+       struct ixgbe_hw_fdir_info   fdir;
+       struct ixgbe_interrupt      intr;
+       struct ixgbe_vfta           shadow_vfta;
+};
+
+#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
+       (&((struct ixgbe_adapter *)adapter)->hw)
+
+#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
+       (&((struct ixgbe_adapter *)adapter)->stats)
+
+#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
+       (&((struct ixgbe_adapter *)adapter)->intr)
+
+#define IXGBE_DEV_PRIVATE_TO_FDIR_INFO(adapter) \
+       (&((struct ixgbe_adapter *)adapter)->fdir)
+
+#define IXGBE_DEV_PRIVATE_TO_VFTA(adapter) \
+       (&((struct ixgbe_adapter *)adapter)->shadow_vfta)
+
+
+/*
+ * RX/TX function prototypes
+ */
+void ixgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+int ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_rx_queues);
+
+int ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_tx_queues);
+
+int  ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+               uint16_t nb_rx_desc, unsigned int socket_id,
+               const struct rte_eth_rxconf *rx_conf,
+               struct rte_mempool *mb_pool);
+
+int  ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+               uint16_t nb_tx_desc, unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf);
+
+int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbe_dev_rxtx_start(struct rte_eth_dev *dev);
+
+int ixgbevf_dev_rx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_tx_init(struct rte_eth_dev *dev);
+
+void ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev);
+
+uint16_t ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts);
+
+uint16_t ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq,
+               struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
+
+uint16_t ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts);
+
+/*
+ * Flow director function prototypes
+ */
+int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+
+int ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint8_t queue);
+
+int ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint8_t queue);
+
+int ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter);
+
+void ixgbe_fdir_info_get(struct rte_eth_dev *dev,
+               struct rte_eth_fdir *fdir);
+
+int ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+               uint8_t queue, uint8_t drop);
+
+int ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter,uint16_t soft_id,
+               uint8_t queue, uint8_t drop);
+
+int ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint16_t soft_id);
+
+int ixgbe_fdir_set_masks(struct rte_eth_dev *dev,
+               struct rte_fdir_masks *fdir_masks);
+
+#endif /* _IXGBE_ETHDEV_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_fdir.c b/lib/librte_pmd_ixgbe/ixgbe_fdir.c
new file mode 100644 (file)
index 0000000..1ebc416
--- /dev/null
@@ -0,0 +1,891 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_interrupts.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_common.h"
+#include "ixgbe_ethdev.h"
+
+/* To get PBALLOC (Packet Buffer Allocation) bits from FDIRCTRL value */
+#define FDIRCTRL_PBALLOC_MASK           0x03
+
+/* For calculating memory required for FDIR filters */
+#define PBALLOC_SIZE_SHIFT              15
+
+/* Number of bits used to mask bucket hash for different pballoc sizes */
+#define PERFECT_BUCKET_64KB_HASH_MASK   0x07FF  /* 11 bits */
+#define PERFECT_BUCKET_128KB_HASH_MASK  0x0FFF  /* 12 bits */
+#define PERFECT_BUCKET_256KB_HASH_MASK  0x1FFF  /* 13 bits */
+#define SIG_BUCKET_64KB_HASH_MASK       0x1FFF  /* 13 bits */
+#define SIG_BUCKET_128KB_HASH_MASK      0x3FFF  /* 14 bits */
+#define SIG_BUCKET_256KB_HASH_MASK      0x7FFF  /* 15 bits */
+
+/**
+ * This function is based on ixgbe_fdir_enable_82599() in ixgbe/ixgbe_82599.c.
+ * It adds extra configuration of fdirctrl that is common for all filter types.
+ *
+ *  Initialize Flow Director control registers
+ *  @hw: pointer to hardware structure
+ *  @fdirctrl: value to write to flow director control register
+ **/
+static void fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
+{
+       int i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* Prime the keys for hashing */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+       /*
+        * Continue setup of fdirctrl register bits:
+        *  Set the maximum length per hash bucket to 0xA filters
+        *  Send interrupt when 64 filters are left
+        */
+       fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) |
+                   (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT);
+
+       /*
+        * Poll init-done after we write the register.  Estimated times:
+        *      10G: PBALLOC = 11b, timing is 60us
+        *       1G: PBALLOC = 11b, timing is 600us
+        *     100M: PBALLOC = 11b, timing is 6ms
+        *
+        *     Multiple these timings by 4 if under full Rx load
+        *
+        * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for
+        * 1 msec per poll time.  If we're at line rate and drop to 100M, then
+        * this might not finish in our poll time, but we can live with that
+        * for now.
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl);
+       IXGBE_WRITE_FLUSH(hw);
+       for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
+               if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
+                                  IXGBE_FDIRCTRL_INIT_DONE)
+                       break;
+               msec_delay(1);
+       }
+
+       if (i >= IXGBE_FDIR_INIT_DONE_POLL)
+               PMD_INIT_LOG(WARNING, "Flow Director poll time exceeded!\n");
+}
+
+/*
+ * Set appropriate bits in fdirctrl for: variable reporting levels, moving
+ * flexbytes matching field, and drop queue (only for perfect matching mode).
+ */
+static int
+configure_fdir_flags(struct rte_fdir_conf *conf, uint32_t *fdirctrl)
+{
+       *fdirctrl = 0;
+
+       switch (conf->pballoc) {
+       case RTE_FDIR_PBALLOC_64K:
+               /* 8k - 1 signature filters */
+               *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K;
+               break;
+       case RTE_FDIR_PBALLOC_128K:
+               /* 16k - 1 signature filters */
+               *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K;
+               break;
+       case RTE_FDIR_PBALLOC_256K:
+               /* 32k - 1 signature filters */
+               *fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K;
+               break;
+       default:
+               /* bad value */
+               PMD_INIT_LOG(ERR, "Invalid fdir_conf->pballoc value");
+               return -EINVAL;
+       };
+
+       /* status flags: write hash & swindex in the rx descriptor */
+       switch (conf->status) {
+       case RTE_FDIR_NO_REPORT_STATUS:
+               /* do nothing, default mode */
+               break;
+       case RTE_FDIR_REPORT_STATUS:
+               /* report status when the packet matches a fdir rule */
+               *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
+               break;
+       case RTE_FDIR_REPORT_STATUS_ALWAYS:
+               /* always report status */
+               *fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS;
+               break;
+       default:
+               /* bad value */
+               PMD_INIT_LOG(ERR, "Invalid fdir_conf->status value");
+               return -EINVAL;
+       };
+
+       *fdirctrl |= (conf->flexbytes_offset << IXGBE_FDIRCTRL_FLEX_SHIFT);
+
+       if (conf->mode == RTE_FDIR_MODE_PERFECT) {
+               *fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH;
+               *fdirctrl |= (conf->drop_queue << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+       }
+
+       return 0;
+}
+
+int
+ixgbe_fdir_configure(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       int err;
+       uint32_t fdirctrl, pbsize;
+       int i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = configure_fdir_flags(&dev->data->dev_conf.fdir_conf, &fdirctrl);
+       if (err)
+               return err;
+
+       /*
+        * Before enabling Flow Director, the Rx Packet Buffer size
+        * must be reduced.  The new value is the current size minus
+        * flow director memory usage size.
+        */
+       pbsize = (1 << (PBALLOC_SIZE_SHIFT + (fdirctrl & FDIRCTRL_PBALLOC_MASK)));
+       IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0),
+           (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize));
+
+       /*
+        * The defaults in the HW for RX PB 1-7 are not zero and so should be
+        * intialized to zero for non DCB mode otherwise actual total RX PB
+        * would be bigger than programmed and filter space would run into
+        * the PB 0 region.
+        */
+       for (i = 1; i < 8; i++)
+               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0);
+
+       fdir_enable_82599(hw, fdirctrl);
+       return 0;
+}
+
+/*
+ * The below function is taken from the FreeBSD IXGBE drivers release
+ * 2.3.8. The only change is not to mask hash_result with IXGBE_ATR_HASH_MASK
+ * before returning, as the signature hash can use 16bits.
+ *
+ * The newer driver has optimised functions for calculating bucket and
+ * signature hashes. However they don't support IPv6 type packets for signature
+ * filters so are not used here.
+ *
+ * Note that the bkt_hash field in the ixgbe_atr_input structure is also never
+ * set.
+ *
+ * Compute the hashes for SW ATR
+ *  @stream: input bitstream to compute the hash on
+ *  @key: 32-bit hash key
+ **/
+static u32
+ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
+                                u32 key)
+{
+       /*
+        * The algorithm is as follows:
+        *    Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350
+        *    where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n]
+        *    and A[n] x B[n] is bitwise AND between same length strings
+        *
+        *    K[n] is 16 bits, defined as:
+        *       for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15]
+        *       for n modulo 32 < 15, K[n] =
+        *             K[(n % 32:0) | (31:31 - (14 - (n % 32)))]
+        *
+        *    S[n] is 16 bits, defined as:
+        *       for n >= 15, S[n] = S[n:n - 15]
+        *       for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))]
+        *
+        *    To simplify for programming, the algorithm is implemented
+        *    in software this way:
+        *
+        *    key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
+        *
+        *    for (i = 0; i < 352; i+=32)
+        *        hi_hash_dword[31:0] ^= Stream[(i+31):i];
+        *
+        *    lo_hash_dword[15:0]  ^= Stream[15:0];
+        *    lo_hash_dword[15:0]  ^= hi_hash_dword[31:16];
+        *    lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
+        *
+        *    hi_hash_dword[31:0]  ^= Stream[351:320];
+        *
+        *    if(key[0])
+        *        hash[15:0] ^= Stream[15:0];
+        *
+        *    for (i = 0; i < 16; i++) {
+        *        if (key[i])
+        *            hash[15:0] ^= lo_hash_dword[(i+15):i];
+        *        if (key[i + 16])
+        *            hash[15:0] ^= hi_hash_dword[(i+15):i];
+        *    }
+        *
+        */
+       __be32 common_hash_dword = 0;
+       u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+       u32 hash_result = 0;
+       u8 i;
+
+       /* record the flow_vm_vlan bits as they are a key part to the hash */
+       flow_vm_vlan = IXGBE_NTOHL(atr_input->dword_stream[0]);
+
+       /* generate common hash dword */
+       for (i = 10; i; i -= 2)
+               common_hash_dword ^= atr_input->dword_stream[i] ^
+                                    atr_input->dword_stream[i - 1];
+
+       hi_hash_dword = IXGBE_NTOHL(common_hash_dword);
+
+       /* low dword is word swapped version of common */
+       lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+       /* apply flow ID/VM pool/VLAN ID bits to hash words */
+       hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+       /* Process bits 0 and 16 */
+       if (key & 0x0001) hash_result ^= lo_hash_dword;
+       if (key & 0x00010000) hash_result ^= hi_hash_dword;
+
+       /*
+        * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+        * delay this because bit 0 of the stream should not be processed
+        * so we do not add the vlan until after bit 0 was processed
+        */
+       lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+
+       /* process the remaining 30 bits in the key 2 bits at a time */
+       for (i = 15; i; i-- ) {
+               if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
+               if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
+       }
+
+       return hash_result;
+}
+
+/*
+ * Calculate the hash value needed for signature-match filters. In the FreeBSD
+ * driver, this is done by the optimised function
+ * ixgbe_atr_compute_sig_hash_82599(). However that can't be used here as it
+ * doesn't support calculating a hash for an IPv6 filter.
+ */
+static uint32_t
+atr_compute_sig_hash_82599(union ixgbe_atr_input *input,
+               enum rte_fdir_pballoc_type pballoc)
+{
+       uint32_t bucket_hash, sig_hash;
+
+       if (pballoc == RTE_FDIR_PBALLOC_256K)
+               bucket_hash = ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               SIG_BUCKET_256KB_HASH_MASK;
+       else if (pballoc == RTE_FDIR_PBALLOC_128K)
+               bucket_hash = ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               SIG_BUCKET_128KB_HASH_MASK;
+       else
+               bucket_hash = ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               SIG_BUCKET_64KB_HASH_MASK;
+
+       sig_hash = ixgbe_atr_compute_hash_82599(input,
+                       IXGBE_ATR_SIGNATURE_HASH_KEY);
+
+       return (sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT) | bucket_hash;
+}
+
+/**
+ * This function is based on ixgbe_atr_add_signature_filter_82599() in
+ * ixgbe/ixgbe_82599.c, but uses a pre-calculated hash value. It also supports
+ * setting extra fields in the FDIRCMD register, and removes the code that was
+ * verifying the flow_type field. According to the documentation, a flow type of
+ * 00 (i.e. not TCP, UDP, or SCTP) is not supported, however it appears to
+ * work ok...
+ *
+ *  Adds a signature hash filter
+ *  @hw: pointer to hardware structure
+ *  @input: unique input dword
+ *  @queue: queue index to direct traffic to
+ *  @fdircmd: any extra flags to set in fdircmd register
+ *  @fdirhash: pre-calculated hash value for the filter
+ **/
+static void
+fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
+               union ixgbe_atr_input *input, u8 queue, u32 fdircmd,
+               u32 fdirhash)
+{
+       u64  fdirhashcmd;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /* configure FDIRCMD register */
+       fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+       fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+       fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+
+       /*
+        * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
+        * is for FDIRCMD.  Then do a 64-bit register write from FDIRHASH.
+        */
+       fdirhashcmd = (u64)fdircmd << 32;
+       fdirhashcmd |= fdirhash;
+       IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
+
+       PMD_INIT_LOG(DEBUG, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+}
+
+/*
+ * Convert DPDK rte_fdir_filter struct to ixgbe_atr_input union that is used
+ * by the IXGBE driver code.
+ */
+static int
+fdir_filter_to_atr_input(struct rte_fdir_filter *fdir_filter,
+               union ixgbe_atr_input *input)
+{
+       if ((fdir_filter->l4type == RTE_FDIR_L4TYPE_SCTP ||
+                       fdir_filter->l4type == RTE_FDIR_L4TYPE_NONE) &&
+                       (fdir_filter->port_src || fdir_filter->port_dst)) {
+               PMD_INIT_LOG(ERR, "Invalid fdir_filter");
+               return -EINVAL;
+       }
+
+       memset(input, 0, sizeof(*input));
+
+       input->formatted.vlan_id = fdir_filter->vlan_id;
+       input->formatted.src_port = fdir_filter->port_src;
+       input->formatted.dst_port = fdir_filter->port_dst;
+       input->formatted.flex_bytes = fdir_filter->flex_bytes;
+
+       switch (fdir_filter->l4type) {
+       case RTE_FDIR_L4TYPE_TCP:
+               input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
+               break;
+       case RTE_FDIR_L4TYPE_UDP:
+               input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
+               break;
+       case RTE_FDIR_L4TYPE_SCTP:
+               input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
+               break;
+       case RTE_FDIR_L4TYPE_NONE:
+               input->formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
+               break;
+       default:
+               PMD_INIT_LOG(ERR, " Error on l4type input");
+               return -EINVAL;
+       }
+
+       if (fdir_filter->iptype == RTE_FDIR_IPTYPE_IPV6) {
+               input->formatted.flow_type |= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+               input->formatted.src_ip[0] = fdir_filter->ip_src.ipv6_addr[0];
+               input->formatted.src_ip[1] = fdir_filter->ip_src.ipv6_addr[1];
+               input->formatted.src_ip[2] = fdir_filter->ip_src.ipv6_addr[2];
+               input->formatted.src_ip[3] = fdir_filter->ip_src.ipv6_addr[3];
+
+               input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv6_addr[0];
+               input->formatted.dst_ip[1] = fdir_filter->ip_dst.ipv6_addr[1];
+               input->formatted.dst_ip[2] = fdir_filter->ip_dst.ipv6_addr[2];
+               input->formatted.dst_ip[3] = fdir_filter->ip_dst.ipv6_addr[3];
+
+       } else {
+               input->formatted.src_ip[0] = fdir_filter->ip_src.ipv4_addr;
+               input->formatted.dst_ip[0] = fdir_filter->ip_dst.ipv4_addr;
+       }
+
+       return 0;
+}
+
+/*
+ * Adds or updates a signature filter.
+ *
+ * dev: ethernet device to add filter to
+ * fdir_filter: filter details
+ * queue: queue index to direct traffic to
+ * update: 0 to add a new filter, otherwise update existing.
+ */
+static int
+fdir_add_update_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint8_t queue, int update)
+{
+       struct ixgbe_hw *hw= IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+       uint32_t fdirhash;
+       union ixgbe_atr_input input;
+       int err;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = fdir_filter_to_atr_input(fdir_filter, &input);
+       if (err)
+               return err;
+
+       fdirhash = atr_compute_sig_hash_82599(&input,
+                       dev->data->dev_conf.fdir_conf.pballoc);
+       fdir_add_signature_filter_82599(hw, &input, queue, fdircmd_flags,
+                       fdirhash);
+       return 0;
+}
+
+int
+ixgbe_fdir_add_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint8_t queue)
+{
+       PMD_INIT_FUNC_TRACE();
+       return fdir_add_update_signature_filter(dev, fdir_filter, queue, 0);
+}
+
+int
+ixgbe_fdir_update_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint8_t queue)
+{
+       PMD_INIT_FUNC_TRACE();
+       return fdir_add_update_signature_filter(dev, fdir_filter, queue, 1);
+}
+
+/*
+ * This is based on ixgbe_fdir_erase_perfect_filter_82599() in
+ * ixgbe/ixgbe_82599.c. It is modified to take in the hash as a parameter so
+ * that it can be used for removing signature and perfect filters.
+ */
+static s32
+fdir_erase_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
+               uint32_t fdirhash)
+{
+       u32 fdircmd = 0;
+       u32 retry_count;
+       s32 err = 0;
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+       /* flush hash to HW */
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* Query if filter is present */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT);
+
+       for (retry_count = 10; retry_count; retry_count--) {
+               /* allow 10us for query to process */
+               usec_delay(10);
+               /* verify query completed successfully */
+               fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD);
+               if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK))
+                       break;
+       }
+
+       if (!retry_count) {
+               PMD_INIT_LOG(ERR, "Timeout querying for flow director filter");
+               err = -EIO;
+       }
+
+       /* if filter exists in hardware then remove it */
+       if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) {
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+               IXGBE_WRITE_FLUSH(hw);
+               IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
+                               IXGBE_FDIRCMD_CMD_REMOVE_FLOW);
+       }
+
+       return err;
+}
+
+int
+ixgbe_fdir_remove_signature_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       union ixgbe_atr_input input;
+       int err;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = fdir_filter_to_atr_input(fdir_filter, &input);
+       if (err)
+               return err;
+
+       return fdir_erase_filter_82599(hw, &input,
+                       atr_compute_sig_hash_82599(&input,
+                       dev->data->dev_conf.fdir_conf.pballoc));
+}
+
+/**
+ * This is based on ixgbe_get_fdirtcpm_82599(), in ixgbe/ixgbe_82599.c. It no
+ * longer does the byte reordering
+ *
+ *  generate a tcp port from atr_input_masks
+ *  @input_mask: mask to be bit swapped
+ *
+ *  The source and destination port masks for flow director are bit swapped
+ *  in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc.  In order to
+ *  generate a correctly swapped value we need to bit swap the mask and that
+ *  is what is accomplished by this function.
+ **/
+static uint32_t
+get_fdirtcpm_82599(struct rte_fdir_masks *input_mask)
+{
+       u32 mask = input_mask->dst_port_mask;
+       mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
+       mask |= input_mask->src_port_mask;
+       mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
+       mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
+       mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
+       return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
+}
+
+/*
+ * This macro exists in ixgbe/ixgbe_82599.c, however in that file it reverses
+ * the bytes, and then reverses them again. So here it does nothing.
+ */
+#define IXGBE_WRITE_REG_BE32 IXGBE_WRITE_REG
+
+/*
+ * This is based on ixgbe_fdir_set_input_mask_82599() in ixgbe/ixgbe_82599.c,
+ * but makes use of the rte_fdir_masks structure to see which bits to set.
+ */
+static int
+fdir_set_input_mask_82599(struct ixgbe_hw *hw,
+               struct rte_fdir_masks *input_mask)
+{
+       /* mask VM pool and IPv6 since it is currently not supported */
+       u32 fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
+       u32 fdirtcpm;
+
+       PMD_INIT_FUNC_TRACE();
+
+       /*
+        * Program the relevant mask registers.  If src/dst_port or src/dst_addr
+        * are zero, then assume a full mask for that field.  Also assume that
+        * a VLAN of 0 is unspecified, so mask that out as well.  L4type
+        * cannot be masked out in this implementation.
+        *
+        * This also assumes IPv4 only.  IPv6 masking isn't supported at this
+        * point in time.
+        */
+       if (input_mask->only_ip_flow) {
+               /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
+               fdirm |= IXGBE_FDIRM_L4P;
+               if (input_mask->dst_port_mask || input_mask->src_port_mask) {
+                       PMD_INIT_LOG(ERR, " Error on src/dst port mask\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (!input_mask->vlan_id)
+               /* mask VLAN ID*/
+               fdirm |= IXGBE_FDIRM_VLANID;
+
+       if (!input_mask->vlan_prio)
+               /* mask VLAN priority */
+               fdirm |= IXGBE_FDIRM_VLANP;
+
+       if (!input_mask->flexbytes)
+               /* Mask Flex Bytes */
+               fdirm |= IXGBE_FDIRM_FLEX;
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
+
+       /* store the TCP/UDP port masks, bit reversed from port layout */
+       fdirtcpm = get_fdirtcpm_82599(input_mask);
+
+       /* write both the same so that UDP and TCP use the same mask */
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
+
+       /* store source and destination IP masks (big-enian) */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
+                       IXGBE_NTOHL(~input_mask->src_ipv4_mask));
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
+                       IXGBE_NTOHL(~input_mask->dst_ipv4_mask));
+
+       return IXGBE_SUCCESS;
+}
+
+int
+ixgbe_fdir_set_masks(struct rte_eth_dev *dev, struct rte_fdir_masks *fdir_masks)
+{
+       struct ixgbe_hw *hw;
+       int err;
+
+       PMD_INIT_FUNC_TRACE();
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = ixgbe_reinit_fdir_tables_82599(hw);
+       if (err) {
+               PMD_INIT_LOG(ERR, "reinit of fdir tables failed");
+               return -EIO;
+       }
+
+       return fdir_set_input_mask_82599(hw, fdir_masks);
+}
+
+static uint32_t
+atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
+               enum rte_fdir_pballoc_type pballoc)
+{
+       if (pballoc == RTE_FDIR_PBALLOC_256K)
+               return ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               PERFECT_BUCKET_256KB_HASH_MASK;
+       else if (pballoc == RTE_FDIR_PBALLOC_128K)
+               return ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               PERFECT_BUCKET_128KB_HASH_MASK;
+       else
+               return ixgbe_atr_compute_hash_82599(input,
+                               IXGBE_ATR_BUCKET_HASH_KEY) &
+                               PERFECT_BUCKET_64KB_HASH_MASK;
+}
+
+/*
+ * This is based on ixgbe_fdir_write_perfect_filter_82599() in
+ * ixgbe/ixgbe_82599.c, with the ability to set extra flags in FDIRCMD register
+ * added, and IPv6 support also added. The hash value is also pre-calculated
+ * as the pballoc value is needed to do it.
+ */
+static void
+fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input,
+               uint16_t soft_id, uint8_t queue, uint32_t fdircmd,
+               uint32_t fdirhash)
+{
+       u32 fdirport, fdirvlan;
+
+       /* record the source address (big-endian) */
+       if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
+               IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]);
+               IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]);
+               IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]);
+               IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[3]);
+       }
+       else {
+               IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
+       }
+
+       /* record the first 32 bits of the destination address (big-endian) */
+       IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
+
+       /* record source and destination port (little-endian)*/
+       fdirport = IXGBE_NTOHS(input->formatted.dst_port);
+       fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
+       fdirport |= IXGBE_NTOHS(input->formatted.src_port);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
+
+       /* record vlan (little-endian) and flex_bytes(big-endian) */
+       fdirvlan = input->formatted.flex_bytes;
+       fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
+       fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id);
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
+
+       /* configure FDIRHASH register */
+       fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
+
+       /*
+        * flush all previous writes to make certain registers are
+        * programmed prior to issuing the command
+        */
+       IXGBE_WRITE_FLUSH(hw);
+
+       /* configure FDIRCMD register */
+       fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW |
+                 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+       fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
+       fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
+       fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT;
+
+       IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
+}
+
+/*
+ * Adds or updates a perfect filter.
+ *
+ * dev: ethernet device to add filter to
+ * fdir_filter: filter details
+ * soft_id: software index for the filters
+ * queue: queue index to direct traffic to
+ * drop: non-zero if packets should be sent to the drop queue
+ * update: 0 to add a new filter, otherwise update existing.
+ */
+static int
+fdir_add_update_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+               uint8_t queue, int drop, int update)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint32_t fdircmd_flags = (update) ? IXGBE_FDIRCMD_FILTER_UPDATE : 0;
+       uint32_t fdirhash;
+       union ixgbe_atr_input input;
+       int err;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = fdir_filter_to_atr_input(fdir_filter, &input);
+       if (err)
+               return err;
+
+       if (drop) {
+               queue = dev->data->dev_conf.fdir_conf.drop_queue;
+               fdircmd_flags |= IXGBE_FDIRCMD_DROP;
+       }
+
+       fdirhash = atr_compute_perfect_hash_82599(&input,
+                       dev->data->dev_conf.fdir_conf.pballoc);
+
+       fdir_write_perfect_filter_82599(hw, &input, soft_id, queue,
+                       fdircmd_flags, fdirhash);
+       return 0;
+}
+
+int
+ixgbe_fdir_add_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+               uint8_t queue, uint8_t drop)
+{
+       PMD_INIT_FUNC_TRACE();
+       return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
+                       drop, 0);
+}
+
+int
+ixgbe_fdir_update_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter, uint16_t soft_id,
+               uint8_t queue, uint8_t drop)
+{
+       PMD_INIT_FUNC_TRACE();
+       return fdir_add_update_perfect_filter(dev, fdir_filter, soft_id, queue,
+                       drop, 1);
+}
+
+int
+ixgbe_fdir_remove_perfect_filter(struct rte_eth_dev *dev,
+               struct rte_fdir_filter *fdir_filter,
+               uint16_t soft_id)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       union ixgbe_atr_input input;
+       uint32_t fdirhash;
+       int err;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return -ENOSYS;
+
+       err = fdir_filter_to_atr_input(fdir_filter, &input);
+       if (err)
+               return err;
+
+       /* configure FDIRHASH register */
+       fdirhash = atr_compute_perfect_hash_82599(&input,
+                       dev->data->dev_conf.fdir_conf.pballoc);
+       fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
+
+       return fdir_erase_filter_82599(hw, &input, fdirhash);
+}
+
+void
+ixgbe_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir *fdir)
+{
+       struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_hw_fdir_info *info =
+                       IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+       uint32_t reg;
+
+       if (hw->mac.type != ixgbe_mac_82599EB)
+               return;
+
+       /* Get the information from registers */
+       reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE);
+       info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >>
+                                       IXGBE_FDIRFREE_COLL_SHIFT);
+       info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >>
+                                  IXGBE_FDIRFREE_FREE_SHIFT);
+
+       reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN);
+       info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >>
+                                     IXGBE_FDIRLEN_MAXHASH_SHIFT);
+       info->maxlen  = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >>
+                                    IXGBE_FDIRLEN_MAXLEN_SHIFT);
+
+       reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT);
+       info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >>
+               IXGBE_FDIRUSTAT_REMOVE_SHIFT;
+       info->add += (reg & IXGBE_FDIRUSTAT_ADD_MASK) >>
+               IXGBE_FDIRUSTAT_ADD_SHIFT;
+
+       reg = IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT) & 0xFFFF;
+       info->f_remove += (reg & IXGBE_FDIRFSTAT_FREMOVE_MASK) >>
+               IXGBE_FDIRFSTAT_FREMOVE_SHIFT;
+       info->f_add += (reg & IXGBE_FDIRFSTAT_FADD_MASK) >>
+               IXGBE_FDIRFSTAT_FADD_SHIFT;
+
+       /*  Copy the new information in the fdir parameter */
+       fdir->collision = info->collision;
+       fdir->free = info->free;
+       fdir->maxhash = info->maxhash;
+       fdir->maxlen = info->maxlen;
+       fdir->remove = info->remove;
+       fdir->add = info->add;
+       fdir->f_remove = info->f_remove;
+       fdir->f_add = info->f_add;
+}
diff --git a/lib/librte_pmd_ixgbe/ixgbe_logs.h b/lib/librte_pmd_ixgbe/ixgbe_logs.h
new file mode 100644 (file)
index 0000000..e8929cc
--- /dev/null
@@ -0,0 +1,76 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _IXGBE_LOGS_H_
+#define _IXGBE_LOGS_H_
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_INIT
+#define PMD_INIT_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_LOG(level, fmt, args...) do { } while(0)
+#define PMD_INIT_FUNC_TRACE() do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#ifdef RTE_LIBRTE_IXGBE_DEBUG_DRIVER
+#define PMD_DRV_LOG(level, fmt, args...) \
+       RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_DRV_LOG(level, fmt, args...) do { } while(0)
+#endif
+
+#endif /* _IXGBE_LOGS_H_ */
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
new file mode 100644 (file)
index 0000000..aa698a3
--- /dev/null
@@ -0,0 +1,2445 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <sys/queue.h>
+
+#include <endian.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_interrupts.h>
+#include <rte_pci.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_sctp.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+
+#include "ixgbe_logs.h"
+#include "ixgbe/ixgbe_api.h"
+#include "ixgbe/ixgbe_vf.h"
+#include "ixgbe_ethdev.h"
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+       struct rte_mbuf *m;
+
+       m = __rte_mbuf_raw_alloc(mp);
+       __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
+       return (m);
+}
+
+#define RTE_MBUF_DATA_DMA_ADDR(mb) \
+       (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \
+       (char *)(mb)->buf_addr))
+
+#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
+       (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
+
+/**
+ * Structure associated with each descriptor of the RX ring of a RX queue.
+ */
+struct igb_rx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
+};
+
+/**
+ * Structure associated with each descriptor of the TX ring of a TX queue.
+ */
+struct igb_tx_entry {
+       struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
+       uint16_t next_id; /**< Index of next descriptor in ring. */
+       uint16_t last_id; /**< Index of last scattered descriptor. */
+};
+
+/**
+ * Structure associated with each RX queue.
+ */
+struct igb_rx_queue {
+       struct rte_mempool  *mb_pool; /**< mbuf pool to populate RX ring. */
+       volatile union ixgbe_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
+       uint64_t            rx_ring_phys_addr; /**< RX ring DMA address. */
+       volatile uint32_t   *rdt_reg_addr; /**< RDT register address. */
+       struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
+       struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
+       struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
+       uint16_t            nb_rx_desc; /**< number of RX descriptors. */
+       uint16_t            rx_tail;  /**< current value of RDT register. */
+       uint16_t            nb_rx_hold; /**< number of held free RX desc. */
+       uint16_t            rx_free_thresh; /**< max free RX desc to hold. */
+       uint16_t            queue_id; /**< RX queue index. */
+       uint8_t             port_id;  /**< Device port identifier. */
+       uint8_t             crc_len;  /**< 0 if CRC stripped, 4 otherwise. */
+};
+
+/**
+ * IXGBE CTX Constants
+ */
+enum ixgbe_advctx_num {
+       IXGBE_CTX_0    = 0, /**< CTX0 */
+       IXGBE_CTX_1    = 1, /**< CTX1  */
+       IXGBE_CTX_NUM  = 2, /**< CTX NUMBER  */
+};
+
+/**
+ * Structure to check if new context need be built
+ */
+struct ixgbe_advctx_info {
+       uint16_t flags;           /**< ol_flags for context build. */
+       uint32_t cmp_mask;        /**< compare mask for vlan_macip_lens */
+       uint32_t vlan_macip_lens; /**< vlan, mac ip length. */
+};
+
+/**
+ * Structure associated with each TX queue.
+ */
+struct igb_tx_queue {
+       /** TX ring virtual address. */
+       volatile union ixgbe_adv_tx_desc *tx_ring;
+       uint64_t            tx_ring_phys_addr; /**< TX ring DMA address. */
+       struct igb_tx_entry *sw_ring;      /**< virtual address of SW ring. */
+       volatile uint32_t   *tdt_reg_addr; /**< Address of TDT register. */
+       uint16_t            nb_tx_desc;    /**< number of TX descriptors. */
+       uint16_t            tx_tail;       /**< current value of TDT reg. */
+       uint16_t            tx_free_thresh;/**< minimum TX before freeing. */
+       /** Number of TX descriptors to use before RS bit is set. */
+       uint16_t            tx_rs_thresh;
+       /** Number of TX descriptors used since RS bit was set. */
+       uint16_t            nb_tx_used;
+       /** Index to last TX descriptor to have been cleaned. */
+       uint16_t            last_desc_cleaned;
+       /** Total number of TX descriptors ready to be allocated. */
+       uint16_t            nb_tx_free;
+       uint16_t            queue_id;      /**< TX queue index. */
+       uint8_t             port_id;       /**< Device port identifier. */
+       uint8_t             pthresh;       /**< Prefetch threshold register. */
+       uint8_t             hthresh;       /**< Host threshold register. */
+       uint8_t             wthresh;       /**< Write-back threshold reg. */
+       uint32_t            ctx_curr;      /**< Hardware context states. */
+       /** Hardware context0 history. */
+       struct ixgbe_advctx_info ctx_cache[IXGBE_CTX_NUM];
+};
+
+
+#if 1
+#define RTE_PMD_USE_PREFETCH
+#endif
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_ixgbe_prefetch(p)   rte_prefetch0(p)
+#else
+#define rte_ixgbe_prefetch(p)   do {} while(0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p)  rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p)  do {} while(0)
+#endif
+
+/*********************************************************************
+ *
+ *  TX functions
+ *
+ **********************************************************************/
+static inline void
+ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
+               volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
+               uint16_t ol_flags, uint32_t vlan_macip_lens)
+{
+       uint32_t type_tucmd_mlhl;
+       uint32_t mss_l4len_idx;
+       uint32_t ctx_idx;
+       uint32_t cmp_mask;
+
+       ctx_idx = txq->ctx_curr;
+       cmp_mask = 0;
+       type_tucmd_mlhl = 0;
+
+       if (ol_flags & PKT_TX_VLAN_PKT) {
+               cmp_mask |= TX_VLAN_CMP_MASK;
+       }
+
+       if (ol_flags & PKT_TX_IP_CKSUM) {
+               type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
+               cmp_mask |= TX_MAC_LEN_CMP_MASK;
+       }
+
+       /* Specify which HW CTX to upload. */
+       mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+       switch (ol_flags & PKT_TX_L4_MASK) {
+       case PKT_TX_UDP_CKSUM:
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
+                               IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct udp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_TCP_CKSUM:
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP |
+                               IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct tcp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       case PKT_TX_SCTP_CKSUM:
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_SCTP |
+                               IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+               mss_l4len_idx |= sizeof(struct sctp_hdr) << IXGBE_ADVTXD_L4LEN_SHIFT;
+               cmp_mask |= TX_MACIP_LEN_CMP_MASK;
+               break;
+       default:
+               type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_RSV |
+                               IXGBE_ADVTXD_DTYP_CTXT | IXGBE_ADVTXD_DCMD_DEXT;
+               break;
+       }
+
+       txq->ctx_cache[ctx_idx].flags = ol_flags;
+       txq->ctx_cache[ctx_idx].cmp_mask = cmp_mask;
+       txq->ctx_cache[ctx_idx].vlan_macip_lens = vlan_macip_lens & cmp_mask;
+
+       ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
+       ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
+       ctx_txd->mss_l4len_idx   = rte_cpu_to_le_32(mss_l4len_idx);
+       ctx_txd->seqnum_seed     = 0;
+}
+
+/*
+ * Check which hardware context can be used. Use the existing match
+ * or create a new context descriptor.
+ */
+static inline uint32_t
+what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
+               uint32_t vlan_macip_lens)
+{
+       /* If match with the current used context */
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* What if match with the next context  */
+       txq->ctx_curr ^= 1;
+       if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
+               (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
+               (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
+                       return txq->ctx_curr;
+       }
+
+       /* Mismatch, use the previous context */
+       return (IXGBE_CTX_NUM);
+}
+
+static inline uint32_t
+tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
+{
+       static const uint32_t l4_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_TXSM};
+       static const uint32_t l3_olinfo[2] = {0, IXGBE_ADVTXD_POPTS_IXSM};
+       uint32_t tmp;
+
+       tmp  = l4_olinfo[(ol_flags & PKT_TX_L4_MASK)  != PKT_TX_L4_NO_CKSUM];
+       tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
+       return tmp;
+}
+
+static inline uint32_t
+tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
+{
+       static const uint32_t vlan_cmd[2] = {0, IXGBE_ADVTXD_DCMD_VLE};
+       return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
+}
+
+/* Default RS bit threshold values */
+#ifndef DEFAULT_TX_RS_THRESH
+#define DEFAULT_TX_RS_THRESH   32
+#endif
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Reset transmit descriptors after they have been used */
+static inline int
+ixgbe_xmit_cleanup(struct igb_tx_queue *txq)
+{
+       struct igb_tx_entry *sw_ring = txq->sw_ring;
+       volatile union ixgbe_adv_tx_desc *txr = txq->tx_ring;
+       uint16_t last_desc_cleaned = txq->last_desc_cleaned;
+       uint16_t nb_tx_desc = txq->nb_tx_desc;
+       uint16_t desc_to_clean_to;
+       uint16_t nb_tx_to_clean;
+
+       /* Determine the last descriptor needing to be cleaned */
+       desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh;
+       if (desc_to_clean_to >= nb_tx_desc)
+               desc_to_clean_to = desc_to_clean_to - nb_tx_desc;
+
+       /* Check to make sure the last descriptor to clean is done */
+       desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
+       if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+       {
+               PMD_TX_FREE_LOG(DEBUG,
+                               "TX descriptor %4u is not done"
+                               "(port=%d queue=%d)",
+                               desc_to_clean_to,
+                               txq->port_id, txq->queue_id);
+               /* Failed to clean any descriptors, better luck next time */
+               return -(1);
+       }
+
+       /* Figure out how many descriptors will be cleaned */
+       if (last_desc_cleaned > desc_to_clean_to)
+               nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) +
+                                 desc_to_clean_to);
+       else
+               nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned;
+
+       PMD_TX_FREE_LOG(DEBUG,
+                       "Cleaning %4u TX descriptors: %4u to %4u "
+                       "(port=%d queue=%d)",
+                       nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
+                       txq->port_id, txq->queue_id);
+
+       /*
+        * The last descriptor to clean is done, so that means all the
+        * descriptors from the last descriptor that was cleaned
+        * up to the last descriptor with the RS bit set
+        * are done. Only reset the threshold descriptor.
+        */
+       txr[desc_to_clean_to].wb.status = 0;
+
+       /* Update the txq to reflect the last descriptor that was cleaned */
+       txq->last_desc_cleaned = desc_to_clean_to;
+       txq->nb_tx_free += nb_tx_to_clean;
+
+       /* No Error */
+       return (0);
+}
+
+uint16_t
+ixgbe_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
+{
+       struct igb_tx_entry *sw_ring;
+       struct igb_tx_entry *txe, *txn;
+       volatile union ixgbe_adv_tx_desc *txr;
+       volatile union ixgbe_adv_tx_desc *txd;
+       struct rte_mbuf     *tx_pkt;
+       struct rte_mbuf     *m_seg;
+       uint64_t buf_dma_addr;
+       uint32_t olinfo_status;
+       uint32_t cmd_type_len;
+       uint32_t pkt_len;
+       uint16_t slen;
+       uint16_t ol_flags;
+       uint16_t tx_id;
+       uint16_t tx_last;
+       uint16_t nb_tx;
+       uint16_t nb_used;
+       uint16_t tx_ol_req;
+       uint32_t vlan_macip_lens;
+       uint32_t ctx;
+       uint32_t new_ctx;
+
+       sw_ring = txq->sw_ring;
+       txr     = txq->tx_ring;
+       tx_id   = txq->tx_tail;
+       txe = &sw_ring[tx_id];
+
+       /* Determine if the descriptor ring needs to be cleaned. */
+       if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
+               ixgbe_xmit_cleanup(txq);
+       }
+
+       /* TX loop */
+       for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+               new_ctx = 0;
+               tx_pkt = *tx_pkts++;
+               pkt_len = tx_pkt->pkt.pkt_len;
+
+               RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
+
+               /*
+                * Determine how many (if any) context descriptors
+                * are needed for offload functionality.
+                */
+               ol_flags = tx_pkt->ol_flags;
+               vlan_macip_lens = tx_pkt->pkt.vlan_tci << 16 |
+                               tx_pkt->pkt.l2_len << IXGBE_ADVTXD_MACLEN_SHIFT |
+                               tx_pkt->pkt.l3_len;
+
+               /* If hardware offload required */
+               tx_ol_req = ol_flags & PKT_TX_OFFLOAD_MASK;
+               if (tx_ol_req) {
+                       /* If new context need be built or reuse the exist ctx. */
+                       ctx = what_advctx_update(txq, tx_ol_req, vlan_macip_lens);
+                       /* Only allocate context descriptor if required*/
+                       new_ctx = (ctx == IXGBE_CTX_NUM);
+                       ctx = txq->ctx_curr;
+               }
+
+               /*
+                * Keep track of how many descriptors are used this loop
+                * This will always be the number of segments + the number of
+                * Context descriptors required to transmit the packet
+                */
+               nb_used = tx_pkt->pkt.nb_segs + new_ctx;
+
+               /*
+                * The number of descriptors that must be allocated for a
+                * packet is the number of segments of that packet, plus 1
+                * Context Descriptor for the hardware offload, if any.
+                * Determine the last TX descriptor to allocate in the TX ring
+                * for the packet, starting from the current position (tx_id)
+                * in the ring.
+                */
+               tx_last = (uint16_t) (tx_id + nb_used - 1);
+
+               /* Circular ring */
+               if (tx_last >= txq->nb_tx_desc)
+                       tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
+
+               PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
+                          " tx_first=%u tx_last=%u\n",
+                          (unsigned) txq->port_id,
+                          (unsigned) txq->queue_id,
+                          (unsigned) pkt_len,
+                          (unsigned) tx_id,
+                          (unsigned) tx_last);
+
+               /*
+                * Make sure there are enough TX descriptors available to
+                * transmit the entire packet.
+                * nb_used better be less than or equal to txq->tx_rs_thresh
+                */
+               if (nb_used > txq->nb_tx_free) {
+                       PMD_TX_FREE_LOG(DEBUG,
+                                       "Not enough free TX descriptors "
+                                       "nb_used=%4u nb_free=%4u "
+                                       "(port=%d queue=%d)",
+                                       nb_used, txq->nb_tx_free,
+                                       txq->port_id, txq->queue_id);
+
+                       if (ixgbe_xmit_cleanup(txq) != 0) {
+                               /* Could not clean any descriptors */
+                               if (nb_tx == 0)
+                                       return (0);
+                               goto end_of_tx;
+                       }
+
+                       /* nb_used better be <= txq->tx_rs_thresh */
+                       if (unlikely(nb_used > txq->tx_rs_thresh)) {
+                               PMD_TX_FREE_LOG(DEBUG,
+                                       "The number of descriptors needed to "
+                                       "transmit the packet exceeds the "
+                                       "RS bit threshold. This will impact "
+                                       "performance."
+                                       "nb_used=%4u nb_free=%4u "
+                                       "tx_rs_thresh=%4u. "
+                                       "(port=%d queue=%d)",
+                                       nb_used, txq->nb_tx_free,
+                                       txq->tx_rs_thresh,
+                                       txq->port_id, txq->queue_id);
+                               /*
+                                * Loop here until there are enough TX
+                                * descriptors or until the ring cannot be
+                                * cleaned.
+                                */
+                               while (nb_used > txq->nb_tx_free) {
+                                       if (ixgbe_xmit_cleanup(txq) != 0) {
+                                               /*
+                                                * Could not clean any
+                                                * descriptors
+                                                */
+                                               if (nb_tx == 0)
+                                                       return (0);
+                                               goto end_of_tx;
+                                       }
+                               }
+                       }
+               }
+
+               /*
+                * By now there are enough free TX descriptors to transmit
+                * the packet.
+                */
+
+               /*
+                * Set common flags of all TX Data Descriptors.
+                *
+                * The following bits must be set in all Data Descriptors:
+                *   - IXGBE_ADVTXD_DTYP_DATA
+                *   - IXGBE_ADVTXD_DCMD_DEXT
+                *
+                * The following bits must be set in the first Data Descriptor
+                * and are ignored in the other ones:
+                *   - IXGBE_ADVTXD_DCMD_IFCS
+                *   - IXGBE_ADVTXD_MAC_1588
+                *   - IXGBE_ADVTXD_DCMD_VLE
+                *
+                * The following bits must only be set in the last Data
+                * Descriptor:
+                *   - IXGBE_TXD_CMD_EOP
+                *
+                * The following bits can be set in any Data Descriptor, but
+                * are only set in the last Data Descriptor:
+                *   - IXGBE_TXD_CMD_RS
+                */
+               cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
+                       IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+               olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+#ifdef RTE_LIBRTE_IEEE1588
+               if (ol_flags & PKT_TX_IEEE1588_TMST)
+                       cmd_type_len |= IXGBE_ADVTXD_MAC_1588;
+#endif
+
+               if (tx_ol_req) {
+                       /*
+                        * Setup the TX Advanced Context Descriptor if required
+                        */
+                       if (new_ctx) {
+                               volatile struct ixgbe_adv_tx_context_desc *
+                                   ctx_txd;
+
+                               ctx_txd = (volatile struct
+                                   ixgbe_adv_tx_context_desc *)
+                                   &txr[tx_id];
+
+                               txn = &sw_ring[txe->next_id];
+                               RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
+
+                               if (txe->mbuf != NULL) {
+                                       rte_pktmbuf_free_seg(txe->mbuf);
+                                       txe->mbuf = NULL;
+                               }
+
+                               ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
+                                   vlan_macip_lens);
+
+                               txe->last_id = tx_last;
+                               tx_id = txe->next_id;
+                               txe = txn;
+                       }
+
+                       /*
+                        * Setup the TX Advanced Data Descriptor,
+                        * This path will go through
+                        * whatever new/reuse the context descriptor
+                        */
+                       cmd_type_len  |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
+                       olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
+                       olinfo_status |= ctx << IXGBE_ADVTXD_IDX_SHIFT;
+               }
+
+               m_seg = tx_pkt;
+               do {
+                       txd = &txr[tx_id];
+                       txn = &sw_ring[txe->next_id];
+
+                       if (txe->mbuf != NULL)
+                               rte_pktmbuf_free_seg(txe->mbuf);
+                       txe->mbuf = m_seg;
+
+                       /*
+                        * Set up Transmit Data Descriptor.
+                        */
+                       slen = m_seg->pkt.data_len;
+                       buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+                       txd->read.buffer_addr =
+                               rte_cpu_to_le_64(buf_dma_addr);
+                       txd->read.cmd_type_len =
+                               rte_cpu_to_le_32(cmd_type_len | slen);
+                       txd->read.olinfo_status =
+                               rte_cpu_to_le_32(olinfo_status);
+                       txe->last_id = tx_last;
+                       tx_id = txe->next_id;
+                       txe = txn;
+                       m_seg = m_seg->pkt.next;
+               } while (m_seg != NULL);
+
+               /*
+                * The last packet data descriptor needs End Of Packet (EOP)
+                */
+               cmd_type_len |= IXGBE_TXD_CMD_EOP;
+               txq->nb_tx_used += nb_used;
+               txq->nb_tx_free -= nb_used;
+
+               /* Set RS bit only on threshold packets' last descriptor */
+               if (txq->nb_tx_used >= txq->tx_rs_thresh) {
+                       PMD_TX_FREE_LOG(DEBUG,
+                                       "Setting RS bit on TXD id="
+                                       "%4u (port=%d queue=%d)",
+                                       tx_last, txq->port_id, txq->queue_id);
+
+                       cmd_type_len |= IXGBE_TXD_CMD_RS;
+
+                       /* Update txq RS bit counters */
+                       txq->nb_tx_used = 0;
+               }
+               txd->read.cmd_type_len |= rte_cpu_to_le_32(cmd_type_len);
+       }
+end_of_tx:
+       rte_wmb();
+
+       /*
+        * Set the Transmit Descriptor Tail (TDT)
+        */
+       PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
+                  (unsigned) txq->port_id, (unsigned) txq->queue_id,
+                  (unsigned) tx_id, (unsigned) nb_tx);
+       IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
+       txq->tx_tail = tx_id;
+
+       return (nb_tx);
+}
+
+/*********************************************************************
+ *
+ *  RX functions
+ *
+ **********************************************************************/
+static inline uint16_t
+rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
+{
+       uint16_t pkt_flags;
+
+       static uint16_t ip_pkt_types_map[16] = {
+               0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
+               PKT_RX_IPV6_HDR, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+               PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
+       };
+
+       static uint16_t ip_rss_types_map[16] = {
+               0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
+               0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
+               PKT_RX_RSS_HASH, 0, 0, 0,
+               0, 0, 0,  PKT_RX_FDIR,
+       };
+
+#ifdef RTE_LIBRTE_IEEE1588
+       static uint32_t ip_pkt_etqf_map[8] = {
+               0, 0, 0, PKT_RX_IEEE1588_PTP,
+               0, 0, 0, 0,
+       };
+
+       pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ?
+                               ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+#else
+       pkt_flags = (uint16_t) ((hl_tp_rs & IXGBE_RXDADV_PKTTYPE_ETQF) ? 0 :
+                               ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F]);
+
+#endif
+       return (pkt_flags | ip_rss_types_map[hl_tp_rs & 0xF]);
+}
+
+static inline uint16_t
+rx_desc_status_to_pkt_flags(uint32_t rx_status)
+{
+       uint16_t pkt_flags;
+
+       /*
+        * Check if VLAN present only.
+        * Do not check whether L3/L4 rx checksum done by NIC or not,
+        * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+        */
+       pkt_flags = (uint16_t) (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
+
+#ifdef RTE_LIBRTE_IEEE1588
+       if (rx_status & IXGBE_RXD_STAT_TMST)
+               pkt_flags = (pkt_flags | PKT_RX_IEEE1588_TMST);
+#endif
+       return pkt_flags;
+}
+
+static inline uint16_t
+rx_desc_error_to_pkt_flags(uint32_t rx_status)
+{
+       /*
+        * Bit 31: IPE, IPv4 checksum error
+        * Bit 30: L4I, L4I integrity error
+        */
+       static uint16_t error_to_pkt_flags_map[4] = {
+               0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+       };
+       return error_to_pkt_flags_map[(rx_status >>
+               IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK];
+}
+
+uint16_t
+ixgbe_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+               uint16_t nb_pkts)
+{
+       volatile union ixgbe_adv_rx_desc *rx_ring;
+       volatile union ixgbe_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union ixgbe_adv_rx_desc rxd;
+       uint64_t dma_addr;
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t pkt_len;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+       while (nb_rx < nb_pkts) {
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * End of packet.
+                *
+                * If the IXGBE_RXDADV_STAT_EOP flag is not set, the RX packet
+                * is likely to be invalid and to be dropped by the various
+                * validation checks performed by the network stack.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy do not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
+                          "ext_err_stat=0x%08x pkt_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_ixgbe_prefetch(&rx_ring[rx_id]);
+                       rte_ixgbe_prefetch(&sw_ring[rx_id]);
+               }
+
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.hdr_addr = dma_addr;
+               rxdp->read.pkt_addr = dma_addr;
+
+               /*
+                * Initialize the returned mbuf.
+                * 1) setup generic mbuf fields:
+                *    - number of segments,
+                *    - next segment,
+                *    - packet length,
+                *    - RX port identifier.
+                * 2) integrate hardware offload data, if any:
+                *    - RSS flag & hash,
+                *    - IP checksum flag,
+                *    - VLAN TCI, if any,
+                *    - error flags.
+                */
+               pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
+                                     rxq->crc_len);
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+               rte_packet_prefetch(rxm->pkt.data);
+               rxm->pkt.nb_segs = 1;
+               rxm->pkt.next = NULL;
+               rxm->pkt.pkt_len = pkt_len;
+               rxm->pkt.data_len = pkt_len;
+               rxm->pkt.in_port = rxq->port_id;
+
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
+               rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
+
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
+               rxm->ol_flags = pkt_flags;
+
+               if (likely(pkt_flags & PKT_RX_RSS_HASH))
+                       rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               else if (pkt_flags & PKT_RX_FDIR) {
+                       rxm->pkt.hash.fdir.hash =
+                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
+                                          & IXGBE_ATR_HASH_MASK);
+                       rxm->pkt.hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+               }
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = rxm;
+       }
+       rxq->rx_tail = rx_id;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+uint16_t
+ixgbe_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
+                         uint16_t nb_pkts)
+{
+       volatile union ixgbe_adv_rx_desc *rx_ring;
+       volatile union ixgbe_adv_rx_desc *rxdp;
+       struct igb_rx_entry *sw_ring;
+       struct igb_rx_entry *rxe;
+       struct rte_mbuf *first_seg;
+       struct rte_mbuf *last_seg;
+       struct rte_mbuf *rxm;
+       struct rte_mbuf *nmb;
+       union ixgbe_adv_rx_desc rxd;
+       uint64_t dma; /* Physical address of mbuf data buffer */
+       uint32_t staterr;
+       uint32_t hlen_type_rss;
+       uint16_t rx_id;
+       uint16_t nb_rx;
+       uint16_t nb_hold;
+       uint16_t data_len;
+       uint16_t pkt_flags;
+
+       nb_rx = 0;
+       nb_hold = 0;
+       rx_id = rxq->rx_tail;
+       rx_ring = rxq->rx_ring;
+       sw_ring = rxq->sw_ring;
+
+       /*
+        * Retrieve RX context of current packet, if any.
+        */
+       first_seg = rxq->pkt_first_seg;
+       last_seg = rxq->pkt_last_seg;
+
+       while (nb_rx < nb_pkts) {
+       next_desc:
+               /*
+                * The order of operations here is important as the DD status
+                * bit must not be read after any other descriptor fields.
+                * rx_ring and rxdp are pointing to volatile data so the order
+                * of accesses cannot be reordered by the compiler. If they were
+                * not volatile, they could be reordered which could lead to
+                * using invalid descriptor fields when read from rxd.
+                */
+               rxdp = &rx_ring[rx_id];
+               staterr = rxdp->wb.upper.status_error;
+               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+                       break;
+               rxd = *rxdp;
+
+               /*
+                * Descriptor done.
+                *
+                * Allocate a new mbuf to replenish the RX ring descriptor.
+                * If the allocation fails:
+                *    - arrange for that RX descriptor to be the first one
+                *      being parsed the next time the receive function is
+                *      invoked [on the same queue].
+                *
+                *    - Stop parsing the RX ring and return immediately.
+                *
+                * This policy does not drop the packet received in the RX
+                * descriptor for which the allocation of a new mbuf failed.
+                * Thus, it allows that packet to be later retrieved if
+                * mbuf have been freed in the mean time.
+                * As a side effect, holding RX descriptors instead of
+                * systematically giving them back to the NIC may lead to
+                * RX ring exhaustion situations.
+                * However, the NIC can gracefully prevent such situations
+                * to happen by sending specific "back-pressure" flow control
+                * frames to its peer(s).
+                */
+               PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
+                          "staterr=0x%x data_len=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) staterr,
+                          (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
+
+               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (nmb == NULL) {
+                       PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
+                                  "queue_id=%u\n", (unsigned) rxq->port_id,
+                                  (unsigned) rxq->queue_id);
+                       rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+                       break;
+               }
+
+               nb_hold++;
+               rxe = &sw_ring[rx_id];
+               rx_id++;
+               if (rx_id == rxq->nb_rx_desc)
+                       rx_id = 0;
+
+               /* Prefetch next mbuf while processing current one. */
+               rte_ixgbe_prefetch(sw_ring[rx_id].mbuf);
+
+               /*
+                * When next RX descriptor is on a cache-line boundary,
+                * prefetch the next 4 RX descriptors and the next 8 pointers
+                * to mbufs.
+                */
+               if ((rx_id & 0x3) == 0) {
+                       rte_ixgbe_prefetch(&rx_ring[rx_id]);
+                       rte_ixgbe_prefetch(&sw_ring[rx_id]);
+               }
+
+               /*
+                * Update RX descriptor with the physical address of the new
+                * data buffer of the new allocated mbuf.
+                */
+               rxm = rxe->mbuf;
+               rxe->mbuf = nmb;
+               dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
+               rxdp->read.hdr_addr = dma;
+               rxdp->read.pkt_addr = dma;
+
+               /*
+                * Set data length & data buffer address of mbuf.
+                */
+               data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
+               rxm->pkt.data_len = data_len;
+               rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+
+               /*
+                * If this is the first buffer of the received packet,
+                * set the pointer to the first mbuf of the packet and
+                * initialize its context.
+                * Otherwise, update the total length and the number of segments
+                * of the current scattered packet, and update the pointer to
+                * the last mbuf of the current packet.
+                */
+               if (first_seg == NULL) {
+                       first_seg = rxm;
+                       first_seg->pkt.pkt_len = data_len;
+                       first_seg->pkt.nb_segs = 1;
+               } else {
+                       first_seg->pkt.pkt_len = (uint16_t)(first_seg->pkt.pkt_len
+                                       + data_len);
+                       first_seg->pkt.nb_segs++;
+                       last_seg->pkt.next = rxm;
+               }
+
+               /*
+                * If this is not the last buffer of the received packet,
+                * update the pointer to the last mbuf of the current scattered
+                * packet and continue to parse the RX ring.
+                */
+               if (! (staterr & IXGBE_RXDADV_STAT_EOP)) {
+                       last_seg = rxm;
+                       goto next_desc;
+               }
+
+               /*
+                * This is the last buffer of the received packet.
+                * If the CRC is not stripped by the hardware:
+                *   - Subtract the CRC length from the total packet length.
+                *   - If the last buffer only contains the whole CRC or a part
+                *     of it, free the mbuf associated to the last buffer.
+                *     If part of the CRC is also contained in the previous
+                *     mbuf, subtract the length of that CRC part from the
+                *     data length of the previous mbuf.
+                */
+               rxm->pkt.next = NULL;
+               if (unlikely(rxq->crc_len > 0)) {
+                       first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
+                       if (data_len <= ETHER_CRC_LEN) {
+                               rte_pktmbuf_free_seg(rxm);
+                               first_seg->pkt.nb_segs--;
+                               last_seg->pkt.data_len = (uint16_t)
+                                       (last_seg->pkt.data_len -
+                                        (ETHER_CRC_LEN - data_len));
+                               last_seg->pkt.next = NULL;
+                       } else
+                               rxm->pkt.data_len =
+                                       (uint16_t) (data_len - ETHER_CRC_LEN);
+               }
+
+               /*
+                * Initialize the first mbuf of the returned packet:
+                *    - RX port identifier,
+                *    - hardware offload data, if any:
+                *      - RSS flag & hash,
+                *      - IP checksum flag,
+                *      - VLAN TCI, if any,
+                *      - error flags.
+                */
+               first_seg->pkt.in_port = rxq->port_id;
+
+               /*
+                * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
+                * set in the pkt_flags field.
+                */
+               first_seg->pkt.vlan_tci =
+                               rte_le_to_cpu_16(rxd.wb.upper.vlan);
+               hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
+               pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
+               pkt_flags = (pkt_flags |
+                                       rx_desc_status_to_pkt_flags(staterr));
+               pkt_flags = (pkt_flags |
+                                       rx_desc_error_to_pkt_flags(staterr));
+               first_seg->ol_flags = pkt_flags;
+
+               if (likely(pkt_flags & PKT_RX_RSS_HASH))
+                       first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
+               else if (pkt_flags & PKT_RX_FDIR) {
+                       first_seg->pkt.hash.fdir.hash =
+                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
+                                          & IXGBE_ATR_HASH_MASK);
+                       first_seg->pkt.hash.fdir.id =
+                               rxd.wb.lower.hi_dword.csum_ip.ip_id;
+               }
+
+               /* Prefetch data of first segment, if configured to do so. */
+               rte_packet_prefetch(first_seg->pkt.data);
+
+               /*
+                * Store the mbuf address into the next entry of the array
+                * of returned packets.
+                */
+               rx_pkts[nb_rx++] = first_seg;
+
+               /*
+                * Setup receipt context for a new packet.
+                */
+               first_seg = NULL;
+       }
+
+       /*
+        * Record index of the next RX descriptor to probe.
+        */
+       rxq->rx_tail = rx_id;
+
+       /*
+        * Save receive context.
+        */
+       rxq->pkt_first_seg = first_seg;
+       rxq->pkt_last_seg = last_seg;
+
+       /*
+        * If the number of free RX descriptors is greater than the RX free
+        * threshold of the queue, advance the Receive Descriptor Tail (RDT)
+        * register.
+        * Update the RDT with the value of the last processed RX descriptor
+        * minus 1, to guarantee that the RDT register is never equal to the
+        * RDH register, which creates a "full" ring situtation from the
+        * hardware point of view...
+        */
+       nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
+       if (nb_hold > rxq->rx_free_thresh) {
+               PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
+                          "nb_hold=%u nb_rx=%u\n",
+                          (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
+                          (unsigned) rx_id, (unsigned) nb_hold,
+                          (unsigned) nb_rx);
+               rx_id = (uint16_t) ((rx_id == 0) ?
+                                    (rxq->nb_rx_desc - 1) : (rx_id - 1));
+               IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
+               nb_hold = 0;
+       }
+       rxq->nb_rx_hold = nb_hold;
+       return (nb_rx);
+}
+
+/*********************************************************************
+ *
+ *  Queue management functions
+ *
+ **********************************************************************/
+
+/*
+ * Rings setup and release.
+ *
+ * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
+ * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
+ * also optimize cache line size effect. H/W supports up to cache line size 128.
+ */
+#define IXGBE_ALIGN 128
+
+/*
+ * Maximum number of Ring Descriptors.
+ *
+ * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
+ * descriptors should meet the following condition:
+ *      (num_ring_desc * sizeof(rx/tx descriptor)) % 128 == 0
+ */
+#define IXGBE_MIN_RING_DESC 64
+#define IXGBE_MAX_RING_DESC 4096
+
+/*
+ * Create memzone for HW rings. malloc can't be used as the physical address is
+ * needed. If the memzone is already created, then this function returns a ptr
+ * to the old one.
+ */
+static const struct rte_memzone *
+ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
+                     uint16_t queue_id, uint32_t ring_size, int socket_id)
+{
+       char z_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;
+
+       rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+                       dev->driver->pci_drv.name, ring_name,
+                       dev->data->port_id, queue_id);
+
+       mz = rte_memzone_lookup(z_name);
+       if (mz)
+               return mz;
+
+       return rte_memzone_reserve_aligned(z_name, (uint64_t) ring_size,
+                       socket_id, 0, IXGBE_ALIGN);
+}
+
+static void
+ixgbe_tx_queue_release_mbufs(struct igb_tx_queue *txq)
+{
+       unsigned i;
+
+       if (txq->sw_ring != NULL) {
+               for (i = 0; i < txq->nb_tx_desc; i++) {
+                       if (txq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                               txq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+ixgbe_tx_queue_release(struct igb_tx_queue *txq)
+{
+       ixgbe_tx_queue_release_mbufs(txq);
+       rte_free(txq->sw_ring);
+       rte_free(txq);
+}
+
+int
+ixgbe_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t old_nb_queues = dev->data->nb_tx_queues;
+       struct igb_tx_queue **txq;
+       unsigned i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (dev->data->tx_queues == NULL) {
+               dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                               CACHE_LINE_SIZE);
+               if (dev->data->tx_queues == NULL) {
+                       dev->data->nb_tx_queues = 0;
+                       return -1;
+               }
+       }
+       else {
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       ixgbe_tx_queue_release(dev->data->tx_queues[i]);
+               txq = rte_realloc(dev->data->tx_queues,
+                               sizeof(struct igb_tx_queue *) * nb_queues,
+                               CACHE_LINE_SIZE);
+               if (txq == NULL)
+                       return -1;
+               else
+                       dev->data->tx_queues = txq;
+               if (nb_queues > old_nb_queues)
+                       memset(&dev->data->tx_queues[old_nb_queues], 0,
+                                       sizeof(struct igb_tx_queue *) *
+                                       (nb_queues - old_nb_queues));
+       }
+       dev->data->nb_tx_queues = nb_queues;
+       return 0;
+}
+
+/* (Re)set dynamic igb_tx_queue fields to defaults */
+static void
+ixgbe_reset_tx_queue(struct igb_tx_queue *txq)
+{
+       struct igb_tx_entry *txe = txq->sw_ring;
+       uint16_t prev, i;
+
+       /* Zero out HW ring memory */
+       for (i = 0; i < sizeof(union ixgbe_adv_tx_desc) * txq->nb_tx_desc; i++) {
+               ((volatile char *)txq->tx_ring)[i] = 0;
+       }
+
+       /* Initialize SW ring entries */
+       prev = (uint16_t) (txq->nb_tx_desc - 1);
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
+               txd->wb.status = IXGBE_TXD_STAT_DD;
+               txe[i].mbuf = NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_tx_used = 0;
+       /*
+        * Always allow 1 descriptor to be un-allocated to avoid
+        * a H/W race condition
+        */
+       txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
+       txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
+       txq->ctx_curr = 0;
+       memset((void*)&txq->ctx_cache, 0,
+               IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info));
+}
+
+int
+ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_txconf *tx_conf)
+{
+       const struct rte_memzone *tz;
+       struct igb_tx_queue *txq;
+       struct ixgbe_hw     *hw;
+       uint16_t tx_rs_thresh, tx_free_thresh;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of transmit descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IXGBE_ALIGN.
+        */
+       if (((nb_desc * sizeof(union ixgbe_adv_tx_desc)) % IXGBE_ALIGN) != 0 ||
+           (nb_desc > IXGBE_MAX_RING_DESC) ||
+           (nb_desc < IXGBE_MIN_RING_DESC)) {
+               return -EINVAL;
+       }
+
+       /*
+        * The following two parameters control the setting of the RS bit on
+        * transmit descriptors.
+        * TX descriptors will have their RS bit set after txq->tx_rs_thresh
+        * descriptors have been used.
+        * The TX descriptor ring will be cleaned after txq->tx_free_thresh
+        * descriptors are used or if the number of descriptors required
+        * to transmit a packet is greater than the number of free TX
+        * descriptors.
+        * The following constraints must be satisfied:
+        *  tx_rs_thresh must be greater than 0.
+        *  tx_rs_thresh must be less than the size of the ring minus 2.
+        *  tx_rs_thresh must be less than or equal to tx_free_thresh.
+        *  tx_free_thresh must be greater than 0.
+        *  tx_free_thresh must be less than the size of the ring minus 3.
+        * One descriptor in the TX ring is used as a sentinel to avoid a
+        * H/W race condition, hence the maximum threshold constraints.
+        * When set to zero use default values.
+        */
+       tx_rs_thresh = (tx_conf->tx_rs_thresh) ?
+                       tx_conf->tx_rs_thresh : DEFAULT_TX_RS_THRESH;
+       tx_free_thresh = (tx_conf->tx_free_thresh) ?
+                       tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH;
+       if (tx_rs_thresh >= (nb_desc - 2)) {
+               RTE_LOG(ERR, PMD,
+                            "tx_rs_thresh must be less than the "
+                            "number of TX descriptors minus 2. "
+                            "(tx_rs_thresh=%u port=%d queue=%d)",
+                            tx_rs_thresh, dev->data->port_id, queue_idx);
+               return -(EINVAL);
+       }
+       if (tx_free_thresh >= (nb_desc - 3)) {
+               RTE_LOG(ERR, PMD,
+                            "tx_rs_thresh must be less than the "
+                            "tx_free_thresh must be less than the "
+                            "number of TX descriptors minus 3. "
+                            "(tx_free_thresh=%u port=%d queue=%d)",
+                            tx_free_thresh, dev->data->port_id, queue_idx);
+               return -(EINVAL);
+       }
+       if (tx_rs_thresh > tx_free_thresh) {
+               RTE_LOG(ERR, PMD,
+                            "tx_rs_thresh must be less than or equal to "
+                            "tx_free_thresh. "
+                            "(tx_free_thresh=%u tx_rs_thresh=%u "
+                            "port=%d queue=%d)",
+                            tx_free_thresh, tx_rs_thresh,
+                            dev->data->port_id, queue_idx);
+               return -(EINVAL);
+       }
+
+       /*
+        * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
+        * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
+        * by the NIC and all descriptors are written back after the NIC
+        * accumulates WTHRESH descriptors.
+        */
+       if ((tx_rs_thresh > 1) && (tx_conf->tx_thresh.wthresh != 0)) {
+               RTE_LOG(ERR, PMD,
+                            "TX WTHRESH should be set to 0 if "
+                            "tx_rs_thresh is greater than 1. "
+                            "TX WTHRESH will be set to 0. "
+                            "(tx_rs_thresh=%u port=%d queue=%d)",
+                            tx_rs_thresh,
+                            dev->data->port_id, queue_idx);
+               return -(EINVAL);
+       }
+
+       /* Free memory prior to re-allocation if needed... */
+       if (dev->data->tx_queues[queue_idx] != NULL)
+               ixgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
+
+       /* First allocate the tx queue data structure */
+       txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
+                         CACHE_LINE_SIZE);
+       if (txq == NULL)
+               return (-ENOMEM);
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                       sizeof(union ixgbe_adv_tx_desc) * IXGBE_MAX_RING_DESC,
+                       socket_id);
+       if (tz == NULL) {
+               ixgbe_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+
+       txq->nb_tx_desc = nb_desc;
+       txq->tx_rs_thresh = tx_rs_thresh;
+       txq->tx_free_thresh = tx_free_thresh;
+       txq->pthresh = tx_conf->tx_thresh.pthresh;
+       txq->hthresh = tx_conf->tx_thresh.hthresh;
+       txq->wthresh = tx_conf->tx_thresh.wthresh;
+       txq->queue_id = queue_idx;
+       txq->port_id = dev->data->port_id;
+
+       /*
+        * Modification to set VFTDT for virtual function if vf is detected
+        */
+       if (hw->mac.type == ixgbe_mac_82599_vf)
+               txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFTDT(queue_idx));
+       else
+               txq->tdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_TDT(queue_idx));
+
+       txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+       txq->tx_ring = (union ixgbe_adv_tx_desc *) tz->addr;
+
+       /* Allocate software ring */
+       txq->sw_ring = rte_zmalloc("txq->sw_ring",
+                                  sizeof(struct igb_tx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (txq->sw_ring == NULL) {
+               ixgbe_tx_queue_release(txq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
+
+       ixgbe_reset_tx_queue(txq);
+
+       dev->data->tx_queues[queue_idx] = txq;
+
+       dev->tx_pkt_burst = ixgbe_xmit_pkts;
+
+       return (0);
+}
+
+static void
+ixgbe_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
+{
+       unsigned i;
+
+       if (rxq->sw_ring != NULL) {
+               for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       if (rxq->sw_ring[i].mbuf != NULL) {
+                               rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
+                               rxq->sw_ring[i].mbuf = NULL;
+                       }
+               }
+       }
+}
+
+static void
+ixgbe_rx_queue_release(struct igb_rx_queue *rxq)
+{
+       ixgbe_rx_queue_release_mbufs(rxq);
+       rte_free(rxq->sw_ring);
+       rte_free(rxq);
+}
+
+int
+ixgbe_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t old_nb_queues = dev->data->nb_rx_queues;
+       struct igb_rx_queue **rxq;
+       unsigned i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (dev->data->rx_queues == NULL) {
+               dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                               CACHE_LINE_SIZE);
+               if (dev->data->rx_queues == NULL) {
+                       dev->data->nb_rx_queues = 0;
+                       return -ENOMEM;
+               }
+       }
+       else {
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       ixgbe_rx_queue_release(dev->data->rx_queues[i]);
+               rxq = rte_realloc(dev->data->rx_queues,
+                               sizeof(struct igb_rx_queue *) * nb_queues,
+                               CACHE_LINE_SIZE);
+               if (rxq == NULL)
+                       return -ENOMEM;
+               else
+                       dev->data->rx_queues = rxq;
+               if (nb_queues > old_nb_queues)
+                       memset(&dev->data->rx_queues[old_nb_queues], 0,
+                                       sizeof(struct igb_rx_queue *) *
+                                       (nb_queues - old_nb_queues));
+       }
+       dev->data->nb_rx_queues = nb_queues;
+       return 0;
+}
+
+/* (Re)set dynamic igb_rx_queue fields to defaults */
+static void
+ixgbe_reset_rx_queue(struct igb_rx_queue *rxq)
+{
+       unsigned i;
+
+       /* Zero out HW ring memory */
+       for (i = 0; i <  rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc); i++) {
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+       }
+
+       rxq->rx_tail = 0;
+       rxq->nb_rx_hold = 0;
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+}
+
+int
+ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
+                        uint16_t queue_idx,
+                        uint16_t nb_desc,
+                        unsigned int socket_id,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       const struct rte_memzone *rz;
+       struct igb_rx_queue *rxq;
+       struct ixgbe_hw     *hw;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Validate number of receive descriptors.
+        * It must not exceed hardware maximum, and must be multiple
+        * of IXGBE_ALIGN.
+        */
+       if (((nb_desc * sizeof(union ixgbe_adv_rx_desc)) % IXGBE_ALIGN) != 0 ||
+           (nb_desc > IXGBE_MAX_RING_DESC) ||
+           (nb_desc < IXGBE_MIN_RING_DESC)) {
+               return (-EINVAL);
+       }
+
+       /* Free memory prior to re-allocation if needed... */
+       if (dev->data->rx_queues[queue_idx] != NULL)
+               ixgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
+
+       /* First allocate the rx queue data structure */
+       rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
+                         CACHE_LINE_SIZE);
+       if (rxq == NULL)
+               return (-ENOMEM);
+       rxq->mb_pool = mp;
+       rxq->nb_rx_desc = nb_desc;
+       rxq->rx_free_thresh = rx_conf->rx_free_thresh;
+       rxq->queue_id = queue_idx;
+       rxq->port_id = dev->data->port_id;
+       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+                                 ETHER_CRC_LEN);
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
+                       IXGBE_MAX_RING_DESC * sizeof(union ixgbe_adv_rx_desc),
+                       socket_id);
+       if (rz == NULL) {
+               ixgbe_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       /*
+        * Modified to setup VFRDT for Virtual Function
+        */
+       if (hw->mac.type == ixgbe_mac_82599_vf)
+               rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx));
+       else
+               rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(queue_idx));
+
+       rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+       rxq->rx_ring = (union ixgbe_adv_rx_desc *) rz->addr;
+
+       /* Allocate software ring */
+       rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
+                                  sizeof(struct igb_rx_entry) * nb_desc,
+                                  CACHE_LINE_SIZE);
+       if (rxq->sw_ring == NULL) {
+               ixgbe_rx_queue_release(rxq);
+               return (-ENOMEM);
+       }
+       PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
+                    rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
+
+       dev->data->rx_queues[queue_idx] = rxq;
+
+       ixgbe_reset_rx_queue(rxq);
+
+       return 0;
+}
+
+void
+ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+       unsigned i;
+
+       PMD_INIT_FUNC_TRACE();
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               struct igb_tx_queue *txq = dev->data->tx_queues[i];
+               ixgbe_tx_queue_release_mbufs(txq);
+               ixgbe_reset_tx_queue(txq);
+       }
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               struct igb_rx_queue *rxq = dev->data->rx_queues[i];
+               ixgbe_rx_queue_release_mbufs(rxq);
+               ixgbe_reset_rx_queue(rxq);
+       }
+}
+
+/*********************************************************************
+ *
+ *  Device RX/TX init functions
+ *
+ **********************************************************************/
+
+/**
+ * Receive Side Scaling (RSS)
+ * See section 7.1.2.8 in the following document:
+ *     "Intel 82599 10 GbE Controller Datasheet" - Revision 2.1 October 2009
+ *
+ * Principles:
+ * The source and destination IP addresses of the IP header and the source
+ * and destination ports of TCP/UDP headers, if any, of received packets are
+ * hashed against a configurable random key to compute a 32-bit RSS hash result.
+ * The seven (7) LSBs of the 32-bit hash result are used as an index into a
+ * 128-entry redirection table (RETA).  Each entry of the RETA provides a 3-bit
+ * RSS output index which is used as the RX queue index where to store the
+ * received packets.
+ * The following output is supplied in the RX write-back descriptor:
+ *     - 32-bit result of the Microsoft RSS hash function,
+ *     - 4-bit RSS type field.
+ */
+
+/*
+ * RSS random key supplied in section 7.1.2.8.3 of the Intel 82599 datasheet.
+ * Used as the default key.
+ */
+static uint8_t rss_intel_key[40] = {
+       0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+       0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+       0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+       0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+       0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
+};
+
+static void
+ixgbe_rss_disable(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+       uint32_t mrqc;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+       mrqc &= ~IXGBE_MRQC_RSSEN;
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+static void
+ixgbe_rss_configure(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw *hw;
+       uint8_t *hash_key;
+       uint32_t rss_key;
+       uint32_t mrqc;
+       uint32_t reta;
+       uint16_t rss_hf;
+       uint16_t i;
+       uint16_t j;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
+       if (rss_hf == 0) { /* Disable RSS */
+               ixgbe_rss_disable(dev);
+               return;
+       }
+       hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
+       if (hash_key == NULL)
+               hash_key = rss_intel_key; /* Default hash key */
+
+       /* Fill in RSS hash key */
+       for (i = 0; i < 10; i++) {
+               rss_key  = hash_key[(i * 4)];
+               rss_key |= hash_key[(i * 4) + 1] << 8;
+               rss_key |= hash_key[(i * 4) + 2] << 16;
+               rss_key |= hash_key[(i * 4) + 3] << 24;
+               IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, rss_key);
+       }
+
+       /* Fill in redirection table */
+       reta = 0;
+       for (i = 0, j = 0; i < 128; i++, j++) {
+               if (j == dev->data->nb_rx_queues) j = 0;
+               reta = (reta << 8) | j;
+               if ((i & 3) == 3)
+                       IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), rte_bswap32(reta));
+       }
+
+       /* Set configured hashing functions in MRQC register */
+       mrqc = IXGBE_MRQC_RSSEN; /* RSS enable */
+       if (rss_hf & ETH_RSS_IPV4)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4;
+       if (rss_hf & ETH_RSS_IPV4_TCP)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_TCP;
+       if (rss_hf & ETH_RSS_IPV6)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6;
+       if (rss_hf & ETH_RSS_IPV6_EX)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX;
+       if (rss_hf & ETH_RSS_IPV6_TCP)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
+       if (rss_hf & ETH_RSS_IPV6_TCP_EX)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP;
+       if (rss_hf & ETH_RSS_IPV4_UDP)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
+       if (rss_hf & ETH_RSS_IPV6_UDP_EX)
+               mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+}
+
+#define NUM_VFTA_REGISTERS 128
+#define NIC_RX_BUFFER_SIZE 0x200
+
+static void
+ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
+{
+       struct rte_eth_vmdq_dcb_conf *cfg;
+       struct ixgbe_hw *hw;
+       enum rte_eth_nb_pools num_pools;
+       uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
+       uint16_t pbsize;
+       uint8_t nb_tcs; /* number of traffic classes */
+       int i;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+       num_pools = cfg->nb_queue_pools;
+       /* Check we have a valid number of pools */
+       if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS) {
+               ixgbe_rss_disable(dev);
+               return;
+       }
+       /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
+       nb_tcs = (uint8_t)(ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
+
+       /*
+        * RXPBSIZE
+        * split rx buffer up into sections, each for 1 traffic class
+        */
+       pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
+       for (i = 0 ; i < nb_tcs; i++) {
+               uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+               rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT));
+               /* clear 10 bits. */
+               rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */
+               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+       }
+       /* zero alloc all unused TCs */
+       for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
+               uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+               rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT ));
+               /* clear 10 bits. */
+               IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize);
+       }
+
+       /* MRQC: enable vmdq and dcb */
+       mrqc = ((num_pools == ETH_16_POOLS) ? \
+               IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN );
+       IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+
+       /* PFVTCTL: turn on virtualisation and set the default pool */
+       vt_ctl = IXGBE_VT_CTL_VT_ENABLE | IXGBE_VT_CTL_REPLEN;
+       if (cfg->enable_default_pool) {
+               vt_ctl |= (cfg->default_pool << IXGBE_VT_CTL_POOL_SHIFT);
+       } else {
+               vt_ctl |= IXGBE_VT_CTL_DIS_DEFPL;
+       }
+       IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+
+       /* RTRUP2TC: mapping user priorities to traffic classes (TCs) */
+       queue_mapping = 0;
+       for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+               /*
+                * mapping is done with 3 bits per priority,
+                * so shift by i*3 each time
+                */
+               queue_mapping |= ((cfg->dcb_queue[i] & 0x07) << (i * 3));
+
+       IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, queue_mapping);
+
+       /* RTRPCS: DCB related */
+       IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, IXGBE_RMCS_RRM);
+
+       /* VLNCTRL: enable vlan filtering and allow all vlan tags through */
+       vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+       vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
+       IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+
+       /* VFTA - enable all vlan filters */
+       for (i = 0; i < NUM_VFTA_REGISTERS; i++) {
+               IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
+       }
+
+       /* VFRE: pool enabling for receive - 16 or 32 */
+       IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \
+                       num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
+
+       /*
+        * MPSAR - allow pools to read specific mac addresses
+        * In this case, all pools should be able to read from mac addr 0
+        */
+       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(0), 0xFFFFFFFF);
+       IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(0), 0xFFFFFFFF);
+
+       /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */
+       for (i = 0; i < cfg->nb_pool_maps; i++) {
+               /* set vlan id in VF register and set the valid bit */
+               IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \
+                               (cfg->pool_map[i].vlan_id & 0xFFF)));
+               /*
+                * Put the allowed pools in VFB reg. As we only have 16 or 32
+                * pools, we only need to use the first half of the register
+                * i.e. bits 0-31
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), cfg->pool_map[i].pools);
+       }
+}
+
+static int
+ixgbe_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
+{
+       struct igb_rx_entry *rxe = rxq->sw_ring;
+       uint64_t dma_addr;
+       unsigned i;
+
+       /* Initialize software ring entries */
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               volatile union ixgbe_adv_rx_desc *rxd;
+               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+               if (mbuf == NULL) {
+                       PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u\n",
+                                    (unsigned) rxq->queue_id);
+                       return (-ENOMEM);
+               }
+               dma_addr =
+                       rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
+               rxd = &rxq->rx_ring[i];
+               rxd->read.hdr_addr = dma_addr;
+               rxd->read.pkt_addr = dma_addr;
+               rxe[i].mbuf = mbuf;
+       }
+
+       return 0;
+}
+
+/*
+ * Initializes Receive Unit.
+ */
+int
+ixgbe_dev_rx_init(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       uint64_t bus_addr;
+       uint32_t rxctrl;
+       uint32_t fctrl;
+       uint32_t hlreg0;
+       uint32_t maxfrs;
+       uint32_t srrctl;
+       uint32_t rdrxctl;
+       uint32_t rxcsum;
+       uint16_t buf_size;
+       uint16_t i;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /*
+        * Make sure receives are disabled while setting
+        * up the RX context (registers, descriptor rings, etc.).
+        */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
+
+       /* Enable receipt of broadcasted frames */
+       fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+       fctrl |= IXGBE_FCTRL_BAM;
+       fctrl |= IXGBE_FCTRL_DPF;
+       fctrl |= IXGBE_FCTRL_PMCF;
+       IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
+
+       /*
+        * Configure CRC stripping, if any.
+        */
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       if (dev->data->dev_conf.rxmode.hw_strip_crc)
+               hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
+       else
+               hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
+
+       /*
+        * Configure jumbo frame support, if any.
+        */
+       if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
+               hlreg0 |= IXGBE_HLREG0_JUMBOEN;
+               maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
+               maxfrs &= 0x0000FFFF;
+               maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
+       } else
+               hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* Setup RX queues */
+       dev->rx_pkt_burst = ixgbe_recv_pkts;
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               /* Allocate buffers for descriptor rings */
+               ret = ixgbe_alloc_rx_queue_mbufs(rxq);
+               if (ret) {
+                       ixgbe_dev_clear_queues(dev);
+                       return ret;
+               }
+
+               /*
+                * Reset crc_len in case it was changed after queue setup by a
+                * call to configure.
+                */
+               rxq->crc_len = (uint8_t)
+                               ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+                               ETHER_CRC_LEN);
+
+               /* Setup the Base and Length of the Rx Descriptor Rings */
+               bus_addr = rxq->rx_ring_phys_addr;
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAL(i),
+                               (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_RDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_RDLEN(i),
+                               rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+               IXGBE_WRITE_REG(hw, IXGBE_RDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), 0);
+
+               /* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+               /*
+                * Configure Header Split
+                */
+               if (dev->data->dev_conf.rxmode.header_split) {
+                       if (hw->mac.type == ixgbe_mac_82599EB) {
+                               /* Must setup the PSRTYPE register */
+                               uint32_t psrtype;
+                               psrtype = IXGBE_PSRTYPE_TCPHDR |
+                                       IXGBE_PSRTYPE_UDPHDR   |
+                                       IXGBE_PSRTYPE_IPV4HDR  |
+                                       IXGBE_PSRTYPE_IPV6HDR;
+                               IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(i), psrtype);
+                       }
+                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+                                  IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               } else
+#endif
+                       srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               /*
+                * Configure the RX buffer size in the BSIZEPACKET field of
+                * the SRRCTL register of the queue.
+                * The value is in 1 KB resolution. Valid values can be from
+                * 1 KB to 16 KB.
+                */
+               mbp_priv = (struct rte_pktmbuf_pool_private *)
+                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+                                      RTE_PKTMBUF_HEADROOM);
+               srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+                          IXGBE_SRRCTL_BSIZEPKT_MASK);
+               IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(i), srrctl);
+
+               buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+                                      IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+                       dev->data->scattered_rx = 1;
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               }
+       }
+
+       /*
+        * Configure RSS if device configured with multiple RX queues.
+        */
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+               if (dev->data->nb_rx_queues > 1)
+                       switch (dev->data->dev_conf.rxmode.mq_mode) {
+                               case ETH_RSS:
+                                       ixgbe_rss_configure(dev);
+                                       break;
+
+                               case ETH_VMDQ_DCB:
+                                       ixgbe_vmdq_dcb_configure(dev);
+                                       break;
+
+                               default: ixgbe_rss_disable(dev);
+                       }
+               else
+                       ixgbe_rss_disable(dev);
+       }
+
+       /*
+        * Setup the Checksum Register.
+        * Disable Full-Packet Checksum which is mutually exclusive with RSS.
+        * Enable IP/L4 checkum computation by hardware if requested to do so.
+        */
+       rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
+       rxcsum |= IXGBE_RXCSUM_PCSD;
+       if (dev->data->dev_conf.rxmode.hw_ip_checksum)
+               rxcsum |= IXGBE_RXCSUM_IPPCSE;
+       else
+               rxcsum &= ~IXGBE_RXCSUM_IPPCSE;
+
+       IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
+
+       if (hw->mac.type == ixgbe_mac_82599EB) {
+               rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
+               if (dev->data->dev_conf.rxmode.hw_strip_crc)
+                       rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
+               else
+                       rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
+               rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE;
+               IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
+       }
+
+       return 0;
+}
+
+/*
+ * Initializes Transmit Unit.
+ */
+void
+ixgbe_dev_tx_init(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint64_t bus_addr;
+       uint32_t hlreg0;
+       uint32_t txctrl;
+       uint32_t rttdcs;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Enable TX CRC (checksum offload requirement) */
+       hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       hlreg0 |= IXGBE_HLREG0_TXCRCEN;
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+
+               bus_addr = txq->tx_ring_phys_addr;
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i),
+                               (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i),
+                               txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+               /* Setup the HW Tx Head and TX Tail descriptor pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0);
+
+               /*
+                * Disable Tx Head Writeback RO bit, since this hoses
+                * bookkeeping if things aren't delivered in order.
+                */
+               switch (hw->mac.type) {
+                       case ixgbe_mac_82598EB:
+                               txctrl = IXGBE_READ_REG(hw,
+                                                       IXGBE_DCA_TXCTRL(i));
+                               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+                               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i),
+                                               txctrl);
+                               break;
+
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540:
+                       default:
+                               txctrl = IXGBE_READ_REG(hw,
+                                               IXGBE_DCA_TXCTRL_82599(i));
+                               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+                               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i),
+                                               txctrl);
+                               break;
+               }
+       }
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               /* disable arbiter before setting MTQC */
+               rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
+               rttdcs |= IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+
+               IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+
+               /* re-enable arbiter */
+               rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
+               IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
+       }
+}
+
+/*
+ * Start Transmit and Receive Units.
+ */
+void
+ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       struct igb_rx_queue *rxq;
+       uint32_t txdctl;
+       uint32_t dmatxctl;
+       uint32_t rxdctl;
+       uint32_t rxctrl;
+       uint16_t i;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               /* Setup Transmit Threshold Registers */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+               txdctl |= txq->pthresh & 0x7F;
+               txdctl |= ((txq->hthresh & 0x7F) << 8);
+               txdctl |= ((txq->wthresh & 0x7F) << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+       }
+
+       if (hw->mac.type != ixgbe_mac_82598EB) {
+               dmatxctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+               dmatxctl |= IXGBE_DMATXCTL_TE;
+               IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(i), txdctl);
+
+               /* Wait until TX Enable ready */
+               if (hw->mac.type == ixgbe_mac_82599EB) {
+                       poll_ms = 10;
+                       do {
+                               rte_delay_ms(1);
+                               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
+                       } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+                       if (!poll_ms)
+                               PMD_INIT_LOG(ERR, "Could not enable "
+                                            "Tx Queue %d\n", i);
+               }
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), rxdctl);
+
+               /* Wait until RX Enable ready */
+               poll_ms = 10;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+               } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable "
+                                    "Rx Queue %d\n", i);
+               rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_RDT(i), rxq->nb_rx_desc - 1);
+       }
+
+       /* Enable Receive engine */
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               rxctrl |= IXGBE_RXCTRL_DMBYPS;
+       rxctrl |= IXGBE_RXCTRL_RXEN;
+       hw->mac.ops.enable_rx_dma(hw, rxctrl);
+}
+
+
+/*
+ * [VF] Initializes Receive Unit.
+ */
+int
+ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_rx_queue *rxq;
+       struct rte_pktmbuf_pool_private *mbp_priv;
+       uint64_t bus_addr;
+       uint32_t srrctl;
+       uint16_t buf_size;
+       uint16_t i;
+       int ret;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Setup RX queues */
+       dev->rx_pkt_burst = ixgbe_recv_pkts;
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+
+               /* Allocate buffers for descriptor rings */
+               ret = ixgbe_alloc_rx_queue_mbufs(rxq);
+               if (ret){
+                       return -1;
+               }
+               /* Setup the Base and Length of the Rx Descriptor Rings */
+               bus_addr = rxq->rx_ring_phys_addr;
+
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+                               (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+                               rxq->nb_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+
+
+               /* Configure the SRRCTL register */
+#ifdef RTE_HEADER_SPLIT_ENABLE
+               /*
+                * Configure Header Split
+                */
+               if (dev->data->dev_conf.rxmode.header_split) {
+
+                       /* Must setup the PSRTYPE register */
+                       uint32_t psrtype;
+                       psrtype = IXGBE_PSRTYPE_TCPHDR |
+                               IXGBE_PSRTYPE_UDPHDR   |
+                               IXGBE_PSRTYPE_IPV4HDR  |
+                               IXGBE_PSRTYPE_IPV6HDR;
+
+                       IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE(i), psrtype);
+
+                       srrctl = ((dev->data->dev_conf.rxmode.split_hdr_size <<
+                                  IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+                                 IXGBE_SRRCTL_BSIZEHDR_MASK);
+                       srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+               } else
+#endif
+                       srrctl = IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+               /*
+                * Configure the RX buffer size in the BSIZEPACKET field of
+                * the SRRCTL register of the queue.
+                * The value is in 1 KB resolution. Valid values can be from
+                * 1 KB to 16 KB.
+                */
+               mbp_priv = (struct rte_pktmbuf_pool_private *)
+                       ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
+               buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
+                                      RTE_PKTMBUF_HEADROOM);
+               srrctl |= ((buf_size >> IXGBE_SRRCTL_BSIZEPKT_SHIFT) &
+                          IXGBE_SRRCTL_BSIZEPKT_MASK);
+
+               /*
+                * VF modification to write virtual function SRRCTL register
+                */
+               IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), srrctl);
+
+               buf_size = (uint16_t) ((srrctl & IXGBE_SRRCTL_BSIZEPKT_MASK) <<
+                                      IXGBE_SRRCTL_BSIZEPKT_SHIFT);
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
+                       dev->data->scattered_rx = 1;
+                       dev->rx_pkt_burst = ixgbe_recv_scattered_pkts;
+               }
+       }
+       return 0;
+}
+
+/*
+ * [VF] Initializes Transmit Unit.
+ */
+void
+ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       uint64_t bus_addr;
+       uint32_t txctrl;
+       uint16_t i;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Setup the Base and Length of the Tx Descriptor Rings */
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               bus_addr = txq->tx_ring_phys_addr;
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+                               (uint32_t)(bus_addr & 0x00000000ffffffffULL));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i),
+                               (uint32_t)(bus_addr >> 32));
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+                               txq->nb_tx_desc * sizeof(union ixgbe_adv_tx_desc));
+               /* Setup the HW Tx Head and TX Tail descriptor pointers */
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+
+               /*
+                * Disable Tx Head Writeback RO bit, since this hoses
+                * bookkeeping if things aren't delivered in order.
+                */
+               txctrl = IXGBE_READ_REG(hw,
+                               IXGBE_VFDCA_TXCTRL(i));
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i),
+                               txctrl);
+       }
+}
+
+/*
+ * [VF] Start Transmit and Receive Units.
+ */
+void
+ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
+{
+       struct ixgbe_hw     *hw;
+       struct igb_tx_queue *txq;
+       struct igb_rx_queue *rxq;
+       uint32_t txdctl;
+       uint32_t rxdctl;
+       uint16_t i;
+       int poll_ms;
+
+       PMD_INIT_FUNC_TRACE();
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               /* Setup Transmit Threshold Registers */
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= txq->pthresh & 0x7F;
+               txdctl |= ((txq->hthresh & 0x7F) << 8);
+               txdctl |= ((txq->wthresh & 0x7F) << 16);
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+
+               txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               txdctl |= IXGBE_TXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+               poll_ms = 10;
+               /* Wait until TX Enable ready */
+               do {
+                       rte_delay_ms(1);
+                       txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+               } while (--poll_ms && !(txdctl & IXGBE_TXDCTL_ENABLE));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable "
+                                        "Tx Queue %d\n", i);
+       }
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+
+               rxq = dev->data->rx_queues[i];
+
+               rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               rxdctl |= IXGBE_RXDCTL_ENABLE;
+               IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+
+               /* Wait until RX Enable ready */
+               poll_ms = 10;
+               do {
+                       rte_delay_ms(1);
+                       rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+               } while (--poll_ms && !(rxdctl & IXGBE_RXDCTL_ENABLE));
+               if (!poll_ms)
+                       PMD_INIT_LOG(ERR, "Could not enable "
+                                        "Rx Queue %d\n", i);
+               rte_wmb();
+               IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), rxq->nb_rx_desc - 1);
+
+       }
+}
diff --git a/lib/librte_ring/Makefile b/lib/librte_ring/Makefile
new file mode 100644 (file)
index 0000000..e77301e
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_ring.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR) -O3
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_RING) := rte_ring.c
+
+# install includes
+SYMLINK-$(CONFIG_RTE_LIBRTE_RING)-include := rte_ring.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_RING) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_ring/rte_ring.c b/lib/librte_ring/rte_ring.c
new file mode 100644 (file)
index 0000000..3eb0d5e
--- /dev/null
@@ -0,0 +1,283 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Derived from FreeBSD's bufring.c
+ *
+ **************************************************************************
+ *
+ * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * 2. The name of Kip Macy nor the names of other
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***************************************************************************/
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_atomic.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_errno.h>
+#include <rte_string_fns.h>
+
+#include "rte_ring.h"
+
+TAILQ_HEAD(rte_ring_list, rte_ring);
+
+/* global list of ring (used for debug/dump) */
+static struct rte_ring_list *ring_list = NULL;
+
+/* true if x is a power of 2 */
+#define POWEROF2(x) ((((x)-1) & (x)) == 0)
+
+/* create the ring */
+struct rte_ring *
+rte_ring_create(const char *name, unsigned count, int socket_id,
+               unsigned flags)
+{
+       char mz_name[RTE_MEMZONE_NAMESIZE];
+       struct rte_ring *r;
+       const struct rte_memzone *mz;
+       size_t ring_size;
+       int mz_flags = 0;
+
+       /* compilation-time checks */
+       RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
+                         CACHE_LINE_MASK) != 0);
+       RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
+                         CACHE_LINE_MASK) != 0);
+       RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
+                         CACHE_LINE_MASK) != 0);
+#ifdef RTE_LIBRTE_RING_DEBUG
+       RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
+                         CACHE_LINE_MASK) != 0);
+       RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
+                         CACHE_LINE_MASK) != 0);
+#endif
+
+       /* check that we have an initialised tail queue */
+       if (ring_list == NULL)
+               if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){
+                       rte_errno = E_RTE_NO_TAILQ;
+                       return NULL;
+               }
+
+       /* count must be a power of 2 */
+       if (!POWEROF2(count)) {
+               rte_errno = EINVAL;
+               RTE_LOG(ERR, RING, "Requested size is not a power of 2\n");
+               return NULL;
+       }
+
+       rte_snprintf(mz_name, sizeof(mz_name), "RG_%s", name);
+       ring_size = count * sizeof(void *) + sizeof(struct rte_ring);
+
+       /* reserve a memory zone for this ring. If we can't get rte_config or
+        * we are secondary process, the memzone_reserve function will set
+        * rte_errno for us appropriately - hence no check in this this function */
+       mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+       if (mz == NULL) {
+               RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+               return NULL;
+       }
+
+       r = mz->addr;
+
+       /* init the ring structure */
+       memset(r, 0, sizeof(*r));
+       rte_snprintf(r->name, sizeof(r->name), "%s", name);
+       r->flags = flags;
+       r->prod.bulk_default = r->cons.bulk_default = 1;
+       r->prod.watermark = count;
+       r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
+       r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
+       r->prod.size = r->cons.size = count;
+       r->prod.mask = r->cons.mask = count-1;
+       r->prod.head = r->cons.head = 0;
+       r->prod.tail = r->cons.tail = 0;
+
+       TAILQ_INSERT_TAIL(ring_list, r, next);
+       return r;
+}
+
+/*
+ * change the high water mark. If *count* is 0, water marking is
+ * disabled
+ */
+int
+rte_ring_set_water_mark(struct rte_ring *r, unsigned count)
+{
+       if (count >= r->prod.size)
+               return -EINVAL;
+
+       /* if count is 0, disable the watermarking */
+       if (count == 0)
+               count = r->prod.size;
+
+       r->prod.watermark = count;
+       return 0;
+}
+
+/* dump the status of the ring on the console */
+void
+rte_ring_dump(const struct rte_ring *r)
+{
+#ifdef RTE_LIBRTE_RING_DEBUG
+       struct rte_ring_debug_stats sum;
+       unsigned lcore_id;
+#endif
+
+       printf("ring <%s>@%p\n", r->name, r);
+       printf("  flags=%x\n", r->flags);
+       printf("  size=%"PRIu32"\n", r->prod.size);
+       printf("  ct=%"PRIu32"\n", r->cons.tail);
+       printf("  ch=%"PRIu32"\n", r->cons.head);
+       printf("  pt=%"PRIu32"\n", r->prod.tail);
+       printf("  ph=%"PRIu32"\n", r->prod.head);
+       printf("  used=%u\n", rte_ring_count(r));
+       printf("  avail=%u\n", rte_ring_free_count(r));
+       if (r->prod.watermark == r->prod.size)
+               printf("  watermark=0\n");
+       else
+               printf("  watermark=%"PRIu32"\n", r->prod.watermark);
+       printf("  bulk_default=%"PRIu32"\n", r->prod.bulk_default);
+
+       /* sum and dump statistics */
+#ifdef RTE_LIBRTE_RING_DEBUG
+       memset(&sum, 0, sizeof(sum));
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               sum.enq_success_bulk += r->stats[lcore_id].enq_success_bulk;
+               sum.enq_success_objs += r->stats[lcore_id].enq_success_objs;
+               sum.enq_quota_bulk += r->stats[lcore_id].enq_quota_bulk;
+               sum.enq_quota_objs += r->stats[lcore_id].enq_quota_objs;
+               sum.enq_fail_bulk += r->stats[lcore_id].enq_fail_bulk;
+               sum.enq_fail_objs += r->stats[lcore_id].enq_fail_objs;
+               sum.deq_success_bulk += r->stats[lcore_id].deq_success_bulk;
+               sum.deq_success_objs += r->stats[lcore_id].deq_success_objs;
+               sum.deq_fail_bulk += r->stats[lcore_id].deq_fail_bulk;
+               sum.deq_fail_objs += r->stats[lcore_id].deq_fail_objs;
+       }
+       printf("  size=%"PRIu32"\n", r->prod.size);
+       printf("  enq_success_bulk=%"PRIu64"\n", sum.enq_success_bulk);
+       printf("  enq_success_objs=%"PRIu64"\n", sum.enq_success_objs);
+       printf("  enq_quota_bulk=%"PRIu64"\n", sum.enq_quota_bulk);
+       printf("  enq_quota_objs=%"PRIu64"\n", sum.enq_quota_objs);
+       printf("  enq_fail_bulk=%"PRIu64"\n", sum.enq_fail_bulk);
+       printf("  enq_fail_objs=%"PRIu64"\n", sum.enq_fail_objs);
+       printf("  deq_success_bulk=%"PRIu64"\n", sum.deq_success_bulk);
+       printf("  deq_success_objs=%"PRIu64"\n", sum.deq_success_objs);
+       printf("  deq_fail_bulk=%"PRIu64"\n", sum.deq_fail_bulk);
+       printf("  deq_fail_objs=%"PRIu64"\n", sum.deq_fail_objs);
+#else
+       printf("  no statistics available\n");
+#endif
+}
+
+/* dump the status of all rings on the console */
+void
+rte_ring_list_dump(void)
+{
+       const struct rte_ring *mp;
+
+       /* check that we have an initialised tail queue */
+       if (ring_list == NULL)
+               if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){
+                       rte_errno = E_RTE_NO_TAILQ;
+                       return;
+               }
+
+       TAILQ_FOREACH(mp, ring_list, next) {
+               rte_ring_dump(mp);
+       }
+}
+
+/* search a ring from its name */
+struct rte_ring *
+rte_ring_lookup(const char *name)
+{
+       struct rte_ring *r;
+
+       /* check that we have an initialised tail queue */
+       if (ring_list == NULL)
+               if ((ring_list = RTE_TAILQ_RESERVE("RTE_RING", rte_ring_list)) == NULL){
+                       rte_errno = E_RTE_NO_TAILQ;
+                       return NULL;
+               }
+
+       TAILQ_FOREACH(r, ring_list, next) {
+               if (strncmp(name, r->name, RTE_RING_NAMESIZE) == 0)
+                       break;
+       }
+
+       if (r == NULL)
+               rte_errno = ENOENT;
+
+       return r;
+}
diff --git a/lib/librte_ring/rte_ring.h b/lib/librte_ring/rte_ring.h
new file mode 100644 (file)
index 0000000..4086c78
--- /dev/null
@@ -0,0 +1,830 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+/*
+ * Derived from FreeBSD's bufring.h
+ *
+ **************************************************************************
+ *
+ * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ *
+ * 2. The name of Kip Macy nor the names of other
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ ***************************************************************************/
+
+#ifndef _RTE_RING_H_
+#define _RTE_RING_H_
+
+/**
+ * @file
+ * RTE Ring
+ *
+ * The Ring Manager is a fixed-size queue, implemented as a table of
+ * pointers. Head and tail pointers are modified atomically, allowing
+ * concurrent access to it. It has the following features:
+ *
+ * - FIFO (First In First Out)
+ * - Maximum size is fixed; the pointers are stored in a table.
+ * - Lockless implementation.
+ * - Multi- or single-consumer dequeue.
+ * - Multi- or single-producer enqueue.
+ * - Bulk dequeue.
+ * - Bulk enqueue.
+ *
+ * Note: the ring implementation is not preemptable. A lcore must not
+ * be interrupted by another task that uses the same ring.
+ *
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <sys/queue.h>
+#include <errno.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+
+
+#ifdef RTE_LIBRTE_RING_DEBUG
+/**
+ * A structure that stores the ring statistics (per-lcore).
+ */
+struct rte_ring_debug_stats {
+       uint64_t enq_success_bulk; /**< Successful enqueues number. */
+       uint64_t enq_success_objs; /**< Objects successfully enqueued. */
+       uint64_t enq_quota_bulk;   /**< Successful enqueues above watermark. */
+       uint64_t enq_quota_objs;   /**< Objects enqueued above watermark. */
+       uint64_t enq_fail_bulk;    /**< Failed enqueues number. */
+       uint64_t enq_fail_objs;    /**< Objects that failed to be enqueued. */
+       uint64_t deq_success_bulk; /**< Successful dequeues number. */
+       uint64_t deq_success_objs; /**< Objects successfully dequeued. */
+       uint64_t deq_fail_bulk;    /**< Failed dequeues number. */
+       uint64_t deq_fail_objs;    /**< Objects that failed to be dequeued. */
+} __rte_cache_aligned;
+#endif
+
+#define RTE_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
+
+/**
+ * An RTE ring structure.
+ *
+ * The producer and the consumer have a head and a tail index. The particularity
+ * of these index is that they are not between 0 and size(ring). These indexes
+ * are between 0 and 2^32, and we mask their value when we access the ring[]
+ * field. Thanks to this assumption, we can do subtractions between 2 index
+ * values in a modulo-32bit base: that's why the overflow of the indexes is not
+ * a problem.
+ */
+struct rte_ring {
+       TAILQ_ENTRY(rte_ring) next;      /**< Next in list. */
+
+       char name[RTE_RING_NAMESIZE];    /**< Name of the ring. */
+       int flags;                       /**< Flags supplied at creation. */
+
+       /** Ring producer status. */
+       struct prod {
+               volatile uint32_t bulk_default; /**< Default bulk count. */
+               uint32_t watermark;      /**< Maximum items before EDQUOT. */
+               uint32_t sp_enqueue;     /**< True, if single producer. */
+               uint32_t size;           /**< Size of ring. */
+               uint32_t mask;           /**< Mask (size-1) of ring. */
+               volatile uint32_t head;  /**< Producer head. */
+               volatile uint32_t tail;  /**< Producer tail. */
+       } prod __rte_cache_aligned;
+
+       /** Ring consumer status. */
+       struct cons {
+               volatile uint32_t bulk_default; /**< Default bulk count. */
+               uint32_t sc_dequeue;     /**< True, if single consumer. */
+               uint32_t size;           /**< Size of the ring. */
+               uint32_t mask;           /**< Mask (size-1) of ring. */
+               volatile uint32_t head;  /**< Consumer head. */
+               volatile uint32_t tail;  /**< Consumer tail. */
+       } cons __rte_cache_aligned;
+
+
+#ifdef RTE_LIBRTE_RING_DEBUG
+       struct rte_ring_debug_stats stats[RTE_MAX_LCORE];
+#endif
+
+       void * volatile ring[0] \
+                       __rte_cache_aligned; /**< Memory space of ring starts here. */
+};
+
+#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
+#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
+
+/**
+ * When debug is enabled, store ring statistics.
+ * @param r
+ *   A pointer to the ring.
+ * @param name
+ *   The name of the statistics field to increment in the ring.
+ * @param n
+ *   The number to add to the object-oriented statistics.
+ */
+#ifdef RTE_LIBRTE_RING_DEBUG
+#define __RING_STAT_ADD(r, name, n) do {               \
+               unsigned __lcore_id = rte_lcore_id();   \
+               r->stats[__lcore_id].name##_objs += n;  \
+               r->stats[__lcore_id].name##_bulk += 1;  \
+       } while(0)
+#else
+#define __RING_STAT_ADD(r, name, n) do {} while(0)
+#endif
+
+/**
+ * Create a new ring named *name* in memory.
+ *
+ * This function uses ``memzone_reserve()`` to allocate memory. Its size is
+ * set to *count*, which must be a power of two. Water marking is
+ * disabled by default. The default bulk count is initialized to 1.
+ * Note that the real usable ring size is *count-1* instead of
+ * *count*.
+ *
+ * @param name
+ *   The name of the ring.
+ * @param count
+ *   The size of the ring (must be a power of 2).
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of
+ *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ *   constraint for the reserved zone.
+ * @param flags
+ *   An OR of the following:
+ *    - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *      is "single-producer". Otherwise, it is "multi-producers".
+ *    - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *      is "single-consumer". Otherwise, it is "multi-consumers".
+ * @return
+ *   On success, the pointer to the new allocated ring. NULL on error with
+ *    rte_errno set appropriately. Possible errno values include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - E_RTE_NO_TAILQ - no tailq list could be got for the ring list
+ *    - EINVAL - count provided is not a power of 2
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_ring *rte_ring_create(const char *name, unsigned count,
+                                int socket_id, unsigned flags);
+
+/**
+ * Set the default bulk count for enqueue/dequeue.
+ *
+ * The parameter *count* is the default number of bulk elements to
+ * get/put when using ``rte_ring_*_{en,de}queue_bulk()``. It must be
+ * greater than 0 and less than half of the ring size.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param count
+ *   A new water mark value.
+ * @return
+ *   - 0: Success; default_bulk_count changed.
+ *   - -EINVAL: Invalid count value.
+ */
+static inline int
+rte_ring_set_bulk_count(struct rte_ring *r, unsigned count)
+{
+       if (unlikely(count == 0 || count >= r->prod.size))
+               return -EINVAL;
+
+       r->prod.bulk_default = r->cons.bulk_default = count;
+       return 0;
+}
+
+/**
+ * Get the default bulk count for enqueue/dequeue.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   The default bulk count for enqueue/dequeue.
+ */
+static inline unsigned
+rte_ring_get_bulk_count(struct rte_ring *r)
+{
+       return r->prod.bulk_default;
+}
+
+/**
+ * Change the high water mark.
+ *
+ * If *count* is 0, water marking is disabled. Otherwise, it is set to the
+ * *count* value. The *count* value must be greater than 0 and less
+ * than the ring size.
+ *
+ * This function can be called at any time (not necessarilly at
+ * initialization).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param count
+ *   The new water mark value.
+ * @return
+ *   - 0: Success; water mark changed.
+ *   - -EINVAL: Invalid water mark value.
+ */
+int rte_ring_set_water_mark(struct rte_ring *r, unsigned count);
+
+/**
+ * Dump the status of the ring to the console.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ */
+void rte_ring_dump(const struct rte_ring *r);
+
+/**
+ * Enqueue several objects on the ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table. The
+ *   value must be strictly positive.
+ * @return
+ *   - 0: Success; objects enqueue.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
+ */
+static inline int
+rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+                        unsigned n)
+{
+       uint32_t prod_head, prod_next;
+       uint32_t cons_tail, free_entries;
+       int success;
+       unsigned i;
+       uint32_t mask = r->prod.mask;
+       int ret;
+
+       /* move prod.head atomically */
+       do {
+               prod_head = r->prod.head;
+               cons_tail = r->cons.tail;
+               /* The subtraction is done between two unsigned 32bits value
+                * (the result is always modulo 32 bits even if we have
+                * prod_head > cons_tail). So 'free_entries' is always between 0
+                * and size(ring)-1. */
+               free_entries = (mask + cons_tail - prod_head);
+
+               /* check that we have enough room in ring */
+               if (unlikely(n > free_entries)) {
+                       __RING_STAT_ADD(r, enq_fail, n);
+                       return -ENOBUFS;
+               }
+
+               prod_next = prod_head + n;
+               success = rte_atomic32_cmpset(&r->prod.head, prod_head,
+                                             prod_next);
+       } while (unlikely(success == 0));
+
+       /* write entries in ring */
+       for (i = 0; likely(i < n); i++)
+               r->ring[(prod_head + i) & mask] = obj_table[i];
+       rte_wmb();
+
+       /* return -EDQUOT if we exceed the watermark */
+       if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+               ret = -EDQUOT;
+               __RING_STAT_ADD(r, enq_quota, n);
+       }
+       else {
+               ret = 0;
+               __RING_STAT_ADD(r, enq_success, n);
+       }
+
+       /*
+        * If there are other enqueues in progress that preceeded us,
+        * we need to wait for them to complete
+        */
+       while (unlikely(r->prod.tail != prod_head))
+               rte_pause();
+
+       r->prod.tail = prod_next;
+       return ret;
+}
+
+/**
+ * Enqueue several objects on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table. The
+ *   value must be strictly positive.
+ * @return
+ *   - 0: Success; objects enqueued.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int
+rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+                        unsigned n)
+{
+       uint32_t prod_head, cons_tail;
+       uint32_t prod_next, free_entries;
+       unsigned i;
+       uint32_t mask = r->prod.mask;
+       int ret;
+
+       prod_head = r->prod.head;
+       cons_tail = r->cons.tail;
+       /* The subtraction is done between two unsigned 32bits value
+        * (the result is always modulo 32 bits even if we have
+        * prod_head > cons_tail). So 'free_entries' is always between 0
+        * and size(ring)-1. */
+       free_entries = mask + cons_tail - prod_head;
+
+       /* check that we have enough room in ring */
+       if (unlikely(n > free_entries)) {
+               __RING_STAT_ADD(r, enq_fail, n);
+               return -ENOBUFS;
+       }
+
+       prod_next = prod_head + n;
+       r->prod.head = prod_next;
+
+       /* write entries in ring */
+       for (i = 0; likely(i < n); i++)
+               r->ring[(prod_head + i) & mask] = obj_table[i];
+       rte_wmb();
+
+       /* return -EDQUOT if we exceed the watermark */
+       if (unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
+               ret = -EDQUOT;
+               __RING_STAT_ADD(r, enq_quota, n);
+       }
+       else {
+               ret = 0;
+               __RING_STAT_ADD(r, enq_success, n);
+       }
+
+       r->prod.tail = prod_next;
+       return ret;
+}
+
+/**
+ * Enqueue several objects on a ring.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the ring from the obj_table.
+ * @return
+ *   - 0: Success; objects enqueued.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int
+rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+                     unsigned n)
+{
+       if (r->prod.sp_enqueue)
+               return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+       else
+               return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+}
+
+/**
+ * Enqueue one object on a ring (multi-producers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * producer index atomically.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ * @return
+ *   - 0: Success; objects enqueued.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int
+rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
+{
+       return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+}
+
+/**
+ * Enqueue one object on a ring (NOT multi-producers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ * @return
+ *   - 0: Success; objects enqueued.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int
+rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
+{
+       return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+}
+
+/**
+ * Enqueue one object on a ring.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version, depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj
+ *   A pointer to the object to be added.
+ * @return
+ *   - 0: Success; objects enqueued.
+ *   - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
+ *     high water mark is exceeded.
+ *   - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
+ */
+static inline int
+rte_ring_enqueue(struct rte_ring *r, void *obj)
+{
+       if (r->prod.sp_enqueue)
+               return rte_ring_sp_enqueue(r, obj);
+       else
+               return rte_ring_mp_enqueue(r, obj);
+}
+
+/**
+ * Dequeue several objects from a ring (multi-consumers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table,
+ *   must be strictly positive
+ * @return
+ *   - 0: Success; objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+       uint32_t cons_head, prod_tail;
+       uint32_t cons_next, entries;
+       int success;
+       unsigned i;
+       uint32_t mask = r->prod.mask;
+
+       /* move cons.head atomically */
+       do {
+               cons_head = r->cons.head;
+               prod_tail = r->prod.tail;
+               /* The subtraction is done between two unsigned 32bits value
+                * (the result is always modulo 32 bits even if we have
+                * cons_head > prod_tail). So 'entries' is always between 0
+                * and size(ring)-1. */
+               entries = (prod_tail - cons_head);
+
+               /* check that we have enough entries in ring */
+               if (unlikely(n > entries)) {
+                       __RING_STAT_ADD(r, deq_fail, n);
+                       return -ENOENT;
+               }
+
+               cons_next = cons_head + n;
+               success = rte_atomic32_cmpset(&r->cons.head, cons_head,
+                                             cons_next);
+       } while (unlikely(success == 0));
+
+       /* copy in table */
+       rte_rmb();
+       for (i = 0; likely(i < n); i++) {
+               obj_table[i] = r->ring[(cons_head + i) & mask];
+       }
+
+       /*
+        * If there are other dequeues in progress that preceeded us,
+        * we need to wait for them to complete
+        */
+       while (unlikely(r->cons.tail != cons_head))
+               rte_pause();
+
+       __RING_STAT_ADD(r, deq_success, n);
+       r->cons.tail = cons_next;
+       return 0;
+}
+
+/**
+ * Dequeue several objects from a ring (NOT multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table,
+ *   must be strictly positive.
+ * @return
+ *   - 0: Success; objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+       uint32_t cons_head, prod_tail;
+       uint32_t cons_next, entries;
+       unsigned i;
+       uint32_t mask = r->prod.mask;
+
+       cons_head = r->cons.head;
+       prod_tail = r->prod.tail;
+       /* The subtraction is done between two unsigned 32bits value
+        * (the result is always modulo 32 bits even if we have
+        * cons_head > prod_tail). So 'entries' is always between 0
+        * and size(ring)-1. */
+       entries = prod_tail - cons_head;
+
+       /* check that we have enough entries in ring */
+       if (unlikely(n > entries)) {
+               __RING_STAT_ADD(r, deq_fail, n);
+               return -ENOENT;
+       }
+
+       cons_next = cons_head + n;
+       r->cons.head = cons_next;
+
+       /* copy in table */
+       rte_rmb();
+       for (i = 0; likely(i < n); i++) {
+               obj_table[i] = r->ring[(cons_head + i) & mask];
+       }
+
+       __RING_STAT_ADD(r, deq_success, n);
+       r->cons.tail = cons_next;
+       return 0;
+}
+
+/**
+ * Dequeue several objects from a ring.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to dequeue from the ring to the obj_table.
+ * @return
+ *   - 0: Success; objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned n)
+{
+       if (r->cons.sc_dequeue)
+               return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+       else
+               return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+}
+
+/**
+ * Dequeue one object from a ring (multi-consumers safe).
+ *
+ * This function uses a "compare and set" instruction to move the
+ * consumer index atomically.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success; objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue; no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
+{
+       return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+}
+
+/**
+ * Dequeue one object from a ring (NOT multi-consumers safe).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success; objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
+{
+       return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+}
+
+/**
+ * Dequeue one object from a ring.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version depending on the default behaviour that was specified at
+ * ring creation time (see flags).
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @param obj_p
+ *   A pointer to a void * pointer (object) that will be filled.
+ * @return
+ *   - 0: Success, objects dequeued.
+ *   - -ENOENT: Not enough entries in the ring to dequeue, no object is
+ *     dequeued.
+ */
+static inline int
+rte_ring_dequeue(struct rte_ring *r, void **obj_p)
+{
+       if (r->cons.sc_dequeue)
+               return rte_ring_sc_dequeue(r, obj_p);
+       else
+               return rte_ring_mc_dequeue(r, obj_p);
+}
+
+/**
+ * Test if a ring is full.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   - 1: The ring is full.
+ *   - 0: The ring is not full.
+ */
+static inline int
+rte_ring_full(const struct rte_ring *r)
+{
+       uint32_t prod_tail = r->prod.tail;
+       uint32_t cons_tail = r->cons.tail;
+       return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
+}
+
+/**
+ * Test if a ring is empty.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   - 1: The ring is empty.
+ *   - 0: The ring is not empty.
+ */
+static inline int
+rte_ring_empty(const struct rte_ring *r)
+{
+       uint32_t prod_tail = r->prod.tail;
+       uint32_t cons_tail = r->cons.tail;
+       return !!(cons_tail == prod_tail);
+}
+
+/**
+ * Return the number of entries in a ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   The number of entries in the ring.
+ */
+static inline unsigned
+rte_ring_count(const struct rte_ring *r)
+{
+       uint32_t prod_tail = r->prod.tail;
+       uint32_t cons_tail = r->cons.tail;
+       return ((prod_tail - cons_tail) & r->prod.mask);
+}
+
+/**
+ * Return the number of free entries in a ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   The number of free entries in the ring.
+ */
+static inline unsigned
+rte_ring_free_count(const struct rte_ring *r)
+{
+       uint32_t prod_tail = r->prod.tail;
+       uint32_t cons_tail = r->cons.tail;
+       return ((cons_tail - prod_tail - 1) & r->prod.mask);
+}
+
+/**
+ * Dump the status of all rings on the console
+ */
+void rte_ring_list_dump(void);
+
+/**
+ * Search a ring from its name
+ *
+ * @param name
+ *   The name of the ring.
+ * @return
+ *   The pointer to the ring matching the name, or NULL if not found,
+ *   with rte_errno set appropriately. Possible rte_errno values include:
+ *    - ENOENT - required entry not available to return.
+ */
+struct rte_ring *rte_ring_lookup(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_RING_H_ */
diff --git a/lib/librte_timer/Makefile b/lib/librte_timer/Makefile
new file mode 100644 (file)
index 0000000..155a960
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_timer.a
+
+CFLAGS += $(WERROR_FLAGS) -I$(SRCDIR)
+
+# all source are stored in SRCS-y
+SRCS-$(CONFIG_RTE_LIBRTE_TIMER) := rte_timer.c
+
+# install this header file
+SYMLINK-$(CONFIG_RTE_LIBRTE_TIMER)-include := rte_timer.h
+
+# this lib needs eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_TIMER) += lib/librte_eal
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/lib/librte_timer/rte_timer.c b/lib/librte_timer/rte_timer.c
new file mode 100644 (file)
index 0000000..a944bee
--- /dev/null
@@ -0,0 +1,506 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/queue.h>
+#include <inttypes.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_per_lcore.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_launch.h>
+#include <rte_tailq.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_branch_prediction.h>
+#include <rte_spinlock.h>
+
+#include "rte_timer.h"
+
+LIST_HEAD(rte_timer_list, rte_timer);
+
+struct priv_timer {
+       struct rte_timer_list pending;  /**< list of pending timers */
+       struct rte_timer_list expired;  /**< list of expired timers */
+       struct rte_timer_list done;     /**< list of done timers */
+       rte_spinlock_t list_lock;       /**< lock to protect list access */
+
+       /** per-core variable that true if a timer was updated on this
+        *  core since last reset of the variable */
+       int updated;
+
+       unsigned prev_lcore;              /**< used for lcore round robin */
+
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+       /** per-lcore statistics */
+       struct rte_timer_debug_stats stats;
+#endif
+} __rte_cache_aligned;
+
+/** per-lcore private info for timers */
+static struct priv_timer priv_timer[RTE_MAX_LCORE];
+
+/* when debug is enabled, store some statistics */
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+#define __TIMER_STAT_ADD(name, n) do {                         \
+               unsigned __lcore_id = rte_lcore_id();           \
+               priv_timer[__lcore_id].stats.name += (n);       \
+       } while(0)
+#else
+#define __TIMER_STAT_ADD(name, n) do {} while(0)
+#endif
+
+/* this macro allow to modify var while browsing the list */
+#define LIST_FOREACH_SAFE(var, var2, head, field)                     \
+       for ((var) = ((head)->lh_first),                               \
+                    (var2) = ((var) ? ((var)->field.le_next) : NULL); \
+            (var);                                                    \
+            (var) = (var2),                                           \
+                    (var2) = ((var) ? ((var)->field.le_next) : NULL))
+
+
+/* Init the timer library. */
+void
+rte_timer_subsystem_init(void)
+{
+       unsigned lcore_id;
+
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++) {
+               LIST_INIT(&priv_timer[lcore_id].pending);
+               LIST_INIT(&priv_timer[lcore_id].expired);
+               LIST_INIT(&priv_timer[lcore_id].done);
+               rte_spinlock_init(&priv_timer[lcore_id].list_lock);
+               priv_timer[lcore_id].prev_lcore = lcore_id;
+       }
+}
+
+/* Initialize the timer handle tim for use */
+void
+rte_timer_init(struct rte_timer *tim)
+{
+       union rte_timer_status status;
+
+       status.state = RTE_TIMER_STOP;
+       status.owner = RTE_TIMER_NO_OWNER;
+       tim->status.u32 = status.u32;
+}
+
+/*
+ * if timer is pending or stopped (or running on the same core than
+ * us), mark timer as configuring, and on success return the previous
+ * status of the timer
+ */
+static int
+timer_set_config_state(struct rte_timer *tim,
+                      union rte_timer_status *ret_prev_status)
+{
+       union rte_timer_status prev_status, status;
+       int success = 0;
+       unsigned lcore_id;
+
+       lcore_id = rte_lcore_id();
+
+       /* wait that the timer is in correct status before update,
+        * and mark it as beeing configured */
+       while (success == 0) {
+               prev_status.u32 = tim->status.u32;
+
+               /* timer is running on another core, exit */
+               if (prev_status.state == RTE_TIMER_RUNNING &&
+                   (unsigned)prev_status.owner != lcore_id)
+                       return -1;
+
+               /* timer is beeing configured on another core */
+               if (prev_status.state == RTE_TIMER_CONFIG)
+                       return -1;
+
+               /* here, we know that timer is stopped or pending,
+                * mark it atomically as beeing configured */
+               status.state = RTE_TIMER_CONFIG;
+               status.owner = (int16_t)lcore_id;
+               success = rte_atomic32_cmpset(&tim->status.u32,
+                                             prev_status.u32,
+                                             status.u32);
+       }
+
+       ret_prev_status->u32 = prev_status.u32;
+       return 0;
+}
+
+/*
+ * if timer is pending, mark timer as running
+ */
+static int
+timer_set_running_state(struct rte_timer *tim)
+{
+       union rte_timer_status prev_status, status;
+       unsigned lcore_id = rte_lcore_id();
+       int success = 0;
+
+       /* wait that the timer is in correct status before update,
+        * and mark it as running */
+       while (success == 0) {
+               prev_status.u32 = tim->status.u32;
+
+               /* timer is not pending anymore */
+               if (prev_status.state != RTE_TIMER_PENDING)
+                       return -1;
+
+               /* here, we know that timer is stopped or pending,
+                * mark it atomically as beeing configured */
+               status.state = RTE_TIMER_RUNNING;
+               status.owner = (int16_t)lcore_id;
+               success = rte_atomic32_cmpset(&tim->status.u32,
+                                             prev_status.u32,
+                                             status.u32);
+       }
+
+       return 0;
+}
+
+/*
+ * add in list, lock if needed
+ * timer must be in config state
+ * timer must not be in a list
+ */
+static void
+timer_add(struct rte_timer *tim, unsigned tim_lcore, int local_is_locked)
+{
+       uint64_t cur_time = rte_get_hpet_cycles();
+       unsigned lcore_id = rte_lcore_id();
+       struct rte_timer *t, *t_prev;
+
+       /* if timer needs to be scheduled on another core, we need to
+        * lock the list; if it is on local core, we need to lock if
+        * we are not called from rte_timer_manage() */
+       if (tim_lcore != lcore_id || !local_is_locked)
+               rte_spinlock_lock(&priv_timer[tim_lcore].list_lock);
+
+       t = LIST_FIRST(&priv_timer[tim_lcore].pending);
+
+       /* list is empty or 'tim' will expire before 't' */
+       if (t == NULL || ((int64_t)(tim->expire - cur_time) <
+                         (int64_t)(t->expire - cur_time))) {
+               LIST_INSERT_HEAD(&priv_timer[tim_lcore].pending, tim, next);
+       }
+       else {
+               t_prev = t;
+
+               /* find an element that will expire after 'tim' */
+               LIST_FOREACH(t, &priv_timer[tim_lcore].pending, next) {
+                       if ((int64_t)(tim->expire - cur_time) <
+                           (int64_t)(t->expire - cur_time)) {
+                               LIST_INSERT_BEFORE(t, tim, next);
+                               break;
+                       }
+                       t_prev = t;
+               }
+
+               /* not found, insert at the end of the list */
+               if (t == NULL)
+                       LIST_INSERT_AFTER(t_prev, tim, next);
+       }
+
+       if (tim_lcore != lcore_id || !local_is_locked)
+               rte_spinlock_unlock(&priv_timer[tim_lcore].list_lock);
+}
+
+/*
+ * del from list, lock if needed
+ * timer must be in config state
+ * timer must be in a list
+ */
+static void
+timer_del(struct rte_timer *tim, unsigned prev_owner, int local_is_locked)
+{
+       unsigned lcore_id = rte_lcore_id();
+
+       /* if timer needs is pending another core, we need to lock the
+        * list; if it is on local core, we need to lock if we are not
+        * called from rte_timer_manage() */
+       if (prev_owner != lcore_id || !local_is_locked)
+               rte_spinlock_lock(&priv_timer[prev_owner].list_lock);
+
+       LIST_REMOVE(tim, next);
+
+       if (prev_owner != lcore_id || !local_is_locked)
+               rte_spinlock_unlock(&priv_timer[prev_owner].list_lock);
+}
+
+/* Reset and start the timer associated with the timer handle (private func) */
+static int
+__rte_timer_reset(struct rte_timer *tim, uint64_t expire,
+                 uint64_t period, unsigned tim_lcore,
+                 rte_timer_cb_t fct, void *arg,
+                 int local_is_locked)
+{
+       union rte_timer_status prev_status, status;
+       int ret;
+       unsigned lcore_id = rte_lcore_id();
+
+       /* round robin for tim_lcore */
+       if (tim_lcore == (unsigned)LCORE_ID_ANY) {
+               tim_lcore = rte_get_next_lcore(priv_timer[lcore_id].prev_lcore,
+                                              0, 1);
+               priv_timer[lcore_id].prev_lcore = tim_lcore;
+       }
+
+       /* wait that the timer is in correct status before update,
+        * and mark it as beeing configured */
+       ret = timer_set_config_state(tim, &prev_status);
+       if (ret < 0)
+               return -1;
+
+       __TIMER_STAT_ADD(reset, 1);
+       priv_timer[lcore_id].updated = 1;
+
+       /* remove it from list */
+       if (prev_status.state == RTE_TIMER_PENDING ||
+           prev_status.state == RTE_TIMER_RUNNING) {
+               timer_del(tim, prev_status.owner, local_is_locked);
+               __TIMER_STAT_ADD(pending, -1);
+       }
+
+       tim->period = period;
+       tim->expire = expire;
+       tim->f = fct;
+       tim->arg = arg;
+
+       __TIMER_STAT_ADD(pending, 1);
+       timer_add(tim, tim_lcore, local_is_locked);
+
+       /* update state: as we are in CONFIG state, only us can modify
+        * the state so we don't need to use cmpset() here */
+       rte_wmb();
+       status.state = RTE_TIMER_PENDING;
+       status.owner = (int16_t)tim_lcore;
+       tim->status.u32 = status.u32;
+
+       return 0;
+}
+
+/* Reset and start the timer associated with the timer handle tim */
+int
+rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
+               enum rte_timer_type type, unsigned tim_lcore,
+               rte_timer_cb_t fct, void *arg)
+{
+       uint64_t cur_time = rte_get_hpet_cycles();
+       uint64_t period;
+
+       if (unlikely((tim_lcore != (unsigned)LCORE_ID_ANY) &&
+                       !rte_lcore_is_enabled(tim_lcore)))
+               return -1;
+
+       if (type == PERIODICAL)
+               period = ticks;
+       else
+               period = 0;
+
+       __rte_timer_reset(tim,  cur_time + ticks, period, tim_lcore,
+                         fct, arg, 0);
+
+       return 0;
+}
+
+/* loop until rte_timer_reset() succeed */
+void
+rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
+                    enum rte_timer_type type, unsigned tim_lcore,
+                    rte_timer_cb_t fct, void *arg)
+{
+       while (rte_timer_reset(tim, ticks, type, tim_lcore,
+                              fct, arg) != 0);
+}
+
+/* Stop the timer associated with the timer handle tim */
+int
+rte_timer_stop(struct rte_timer *tim)
+{
+       union rte_timer_status prev_status, status;
+       unsigned lcore_id = rte_lcore_id();
+       int ret;
+
+       /* wait that the timer is in correct status before update,
+        * and mark it as beeing configured */
+       ret = timer_set_config_state(tim, &prev_status);
+       if (ret < 0)
+               return -1;
+
+       __TIMER_STAT_ADD(stop, 1);
+       priv_timer[lcore_id].updated = 1;
+
+       /* remove it from list */
+       if (prev_status.state == RTE_TIMER_PENDING ||
+           prev_status.state == RTE_TIMER_RUNNING) {
+               timer_del(tim, prev_status.owner, 0);
+               __TIMER_STAT_ADD(pending, -1);
+       }
+
+       /* mark timer as stopped */
+       rte_wmb();
+       status.state = RTE_TIMER_STOP;
+       status.owner = RTE_TIMER_NO_OWNER;
+       tim->status.u32 = status.u32;
+
+       return 0;
+}
+
+/* loop until rte_timer_stop() succeed */
+void
+rte_timer_stop_sync(struct rte_timer *tim)
+{
+       while (rte_timer_stop(tim) != 0);
+}
+
+/* Test the PENDING status of the timer handle tim */
+int
+rte_timer_pending(struct rte_timer *tim)
+{
+       return tim->status.state == RTE_TIMER_PENDING;
+}
+
+/* must be called periodically, run all timer that expired */
+void rte_timer_manage(void)
+{
+       union rte_timer_status status;
+       struct rte_timer *tim, *tim2;
+       unsigned lcore_id = rte_lcore_id();
+       uint64_t cur_time = rte_get_hpet_cycles();
+       int ret;
+
+       __TIMER_STAT_ADD(manage, 1);
+
+       /* browse ordered list, add expired timers in 'expired' list */
+       rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
+
+       LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].pending, next) {
+               if ((int64_t)(cur_time - tim->expire) < 0)
+                       break;
+
+               LIST_REMOVE(tim, next);
+               LIST_INSERT_HEAD(&priv_timer[lcore_id].expired, tim, next);
+       }
+
+
+       /* for each timer of 'expired' list, check state and execute callback */
+       while ((tim = LIST_FIRST(&priv_timer[lcore_id].expired)) != NULL) {
+               ret = timer_set_running_state(tim);
+
+               /* remove from expired list, and add it in done list */
+               LIST_REMOVE(tim, next);
+               LIST_INSERT_HEAD(&priv_timer[lcore_id].done, tim, next);
+
+               /* this timer was not pending, continue */
+               if (ret < 0)
+                       continue;
+
+               rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+
+               priv_timer[lcore_id].updated = 0;
+
+               /* execute callback function with list unlocked */
+               tim->f(tim, tim->arg);
+
+               rte_spinlock_lock(&priv_timer[lcore_id].list_lock);
+
+               /* the timer was stopped or reloaded by the callback
+                * function, we have nothing to do here */
+               if (priv_timer[lcore_id].updated == 1)
+                       continue;
+
+               if (tim->period == 0) {
+                       /* remove from done list and mark timer as stopped */
+                       LIST_REMOVE(tim, next);
+                       __TIMER_STAT_ADD(pending, -1);
+                       status.state = RTE_TIMER_STOP;
+                       status.owner = RTE_TIMER_NO_OWNER;
+                       rte_wmb();
+                       tim->status.u32 = status.u32;
+               }
+               else {
+                       /* keep it in done list and mark timer as pending */
+                       status.state = RTE_TIMER_PENDING;
+                       status.owner = (int16_t)lcore_id;
+                       rte_wmb();
+                       tim->status.u32 = status.u32;
+               }
+       }
+
+       /* finally, browse done list, some timer may have to be
+        * rescheduled automatically */
+       LIST_FOREACH_SAFE(tim, tim2, &priv_timer[lcore_id].done, next) {
+
+               /* reset may fail if timer is beeing modified, in this
+                * case the timer will remain in 'done' list until the
+                * core that is modifying it remove it */
+               __rte_timer_reset(tim, cur_time + tim->period,
+                                 tim->period, lcore_id, tim->f,
+                                 tim->arg, 1);
+       }
+
+       /* job finished, unlock the list lock */
+       rte_spinlock_unlock(&priv_timer[lcore_id].list_lock);
+}
+
+/* dump statistics about timers */
+void rte_timer_dump_stats(void)
+{
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+       struct rte_timer_debug_stats sum;
+       unsigned lcore_id;
+
+       memset(&sum, 0, sizeof(sum));
+       for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+               sum.reset += priv_timer[lcore_id].stats.reset;
+               sum.stop += priv_timer[lcore_id].stats.stop;
+               sum.manage += priv_timer[lcore_id].stats.manage;
+               sum.pending += priv_timer[lcore_id].stats.pending;
+       }
+       printf("Timer statistics:\n");
+       printf("  reset = %"PRIu64"\n", sum.reset);
+       printf("  stop = %"PRIu64"\n", sum.stop);
+       printf("  manage = %"PRIu64"\n", sum.manage);
+       printf("  pending = %"PRIu64"\n", sum.pending);
+#else
+       printf("No timer statistics, RTE_LIBRTE_TIMER_DEBUG is disabled\n");
+#endif
+}
diff --git a/lib/librte_timer/rte_timer.h b/lib/librte_timer/rte_timer.h
new file mode 100644 (file)
index 0000000..a44bc90
--- /dev/null
@@ -0,0 +1,332 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#ifndef _RTE_TIMER_H_
+#define _RTE_TIMER_H_
+
+/**
+ * @file
+ RTE Timer
+ *
+ * This library provides a timer service to RTE Data Plane execution
+ * units that allows the execution of callback functions asynchronously.
+ *
+ * - Timers can be periodic or single (one-shot).
+ * - The timers can be loaded from one core and executed on another. This has
+ *   to be specified in the call to rte_timer_reset().
+ * - High precision is possible. NOTE: this depends on the call frequency to
+ *   rte_timer_manage() that check the timer expiration for the local core.
+ * - If not used in an application, for improved performance, it can be
+ *   disabled at compilation time by not calling the rte_timer_manage()
+ *   to improve performance.
+ *
+ * The timer library uses the rte_get_hpet_cycles() function that
+ * uses the HPET, when available, to provide a reliable time reference. [HPET
+ * routines are provided by EAL, which falls back to using the chip TSC (time-
+ * stamp counter) as fallback when HPET is not available]
+ *
+ * This library provides an interface to add, delete and restart a
+ * timer. The API is based on the BSD callout(9) API with a few
+ * differences.
+ *
+ * See the RTE architecture documentation for more information about the
+ * design of this library.
+ */
+
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/queue.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_TIMER_STOP    0 /**< State: timer is stopped. */
+#define RTE_TIMER_PENDING 1 /**< State: timer is scheduled. */
+#define RTE_TIMER_RUNNING 2 /**< State: timer function is running. */
+#define RTE_TIMER_CONFIG  3 /**< State: timer is being configured. */
+
+#define RTE_TIMER_NO_OWNER -1 /**< Timer has no owner. */
+
+/**
+ * Timer type: Periodic or single (one-shot).
+ */
+enum rte_timer_type {
+       SINGLE,
+       PERIODICAL
+};
+
+/**
+ * Timer status: A union of the state (stopped, pending, running,
+ * config) and an owner (the id of the lcore that owns the timer).
+ */
+union rte_timer_status {
+       struct {
+               uint16_t state;  /**< Stop, pending, running, config. */
+               int16_t owner;   /**< The lcore that owns the timer. */
+       };
+       uint32_t u32;            /**< To atomic-set status + owner. */
+};
+
+#ifdef RTE_LIBRTE_TIMER_DEBUG
+/**
+ * A structure that stores the timer statistics (per-lcore).
+ */
+struct rte_timer_debug_stats {
+       uint64_t reset;   /**< Number of success calls to rte_timer_reset(). */
+       uint64_t stop;    /**< Number of success calls to rte_timer_stop(). */
+       uint64_t manage;  /**< Number of calls to rte_timer_manage(). */
+       uint64_t pending; /**< Number of pending/running timers. */
+};
+#endif
+
+struct rte_timer;
+
+/**
+ * Callback function type for timer expiry.
+ */
+typedef void (rte_timer_cb_t)(struct rte_timer *, void *);
+
+/**
+ * A structure describing a timer in RTE.
+ */
+struct rte_timer
+{
+       LIST_ENTRY(rte_timer) next;    /**< Next and prev in list. */
+       volatile union rte_timer_status status; /**< Status of timer. */
+       uint64_t period;       /**< Period of timer (0 if not periodic). */
+       uint64_t expire;       /**< Time when timer expire. */
+       rte_timer_cb_t *f;     /**< Callback function. */
+       void *arg;             /**< Argument to callback function. */
+};
+
+
+#ifdef __cplusplus
+/**
+ * A C++ static initializer for a timer structure.
+ */
+#define RTE_TIMER_INITIALIZER {                 \
+       {0, 0},                                 \
+       {{RTE_TIMER_STOP, RTE_TIMER_NO_OWNER}}, \
+       0,                                      \
+       0,                                      \
+       NULL,                                   \
+       NULL,                                   \
+       }
+#else
+/**
+ * A static initializer for a timer structure.
+ */
+#define RTE_TIMER_INITIALIZER {                      \
+               .status = {{                         \
+                       .state = RTE_TIMER_STOP,     \
+                       .owner = RTE_TIMER_NO_OWNER, \
+               }},                                  \
+       }
+#endif
+
+/**
+ * Initialize the timer library.
+ *
+ * Initializes internal variables (list, locks and so on) for the RTE
+ * timer library.
+ */
+void rte_timer_subsystem_init(void);
+
+/**
+ * Initialize a timer handle.
+ *
+ * The rte_timer_init() function initializes the timer handle *tim*
+ * for use. No operations can be performed on a timer before it is
+ * initialized.
+ *
+ * @param tim
+ *   The timer to initialize.
+ */
+void rte_timer_init(struct rte_timer *tim);
+
+/**
+ * Reset and start the timer associated with the timer handle.
+ *
+ * The rte_timer_reset() function resets and starts the timer
+ * associated with the timer handle *tim*. When the timer expires after
+ * *ticks* HPET cycles, the function specified by *fct* will be called
+ * with the argument *arg* on core *tim_lcore*.
+ *
+ * If the timer associated with the timer handle is already running
+ * (in the RUNNING state), the function will fail. The user has to check
+ * the return value of the function to see if there is a chance that the
+ * timer is in the RUNNING state.
+ *
+ * If the timer is being configured on another core (the CONFIG state),
+ * it will also fail.
+ *
+ * If the timer is pending or stopped, it will be rescheduled with the
+ * new parameters.
+ *
+ * @param tim
+ *   The timer handle.
+ * @param ticks
+ *   The number of cycles (see rte_get_hpet_hz()) before the callback
+ *   function is called.
+ * @param type
+ *   The type can be either:
+ *   - PERIODICAL: The timer is automatically reloaded after execution
+ *     (returns to the PENDING state)
+ *   - SINGLE: The timer is one-shot, that is, the timer goes to a
+ *     STOPPED state after execution.
+ * @param tim_lcore
+ *   The ID of the lcore where the timer callback function has to be
+ *   executed. If tim_lcore is LCORE_ID_ANY, the timer library will
+ *   launch it on a different core for each call (round-robin).
+ * @param fct
+ *   The callback function of the timer.
+ * @param arg
+ *   The user argument of the callback function.
+ * @return
+ *   - 0: Success; the timer is scheduled.
+ *   - (-1): Timer is in the RUNNING or CONFIG state.
+ */
+int rte_timer_reset(struct rte_timer *tim, uint64_t ticks,
+                   enum rte_timer_type type, unsigned tim_lcore,
+                   rte_timer_cb_t fct, void *arg);
+
+
+/**
+ * Loop until rte_timer_reset() succeeds.
+ *
+ * Reset and start the timer associated with the timer handle. Always
+ * succeed. See rte_timer_reset() for details.
+ *
+ * @param tim
+ *   The timer handle.
+ * @param ticks
+ *   The number of cycles (see rte_get_hpet_hz()) before the callback
+ *   function is called.
+ * @param type
+ *   The type can be either:
+ *   - PERIODICAL: The timer is automatically reloaded after execution
+ *     (returns to the PENDING state)
+ *   - SINGLE: The timer is one-shot, that is, the timer goes to a
+ *     STOPPED state after execution.
+ * @param tim_lcore
+ *   The ID of the lcore where the timer callback function has to be
+ *   executed. If tim_lcore is LCORE_ID_ANY, the timer library will
+ *   launch it on a different core for each call (round-robin).
+ * @param fct
+ *   The callback function of the timer.
+ * @param arg
+ *   The user argument of the callback function.
+ */
+void
+rte_timer_reset_sync(struct rte_timer *tim, uint64_t ticks,
+                    enum rte_timer_type type, unsigned tim_lcore,
+                    rte_timer_cb_t fct, void *arg);
+
+/**
+ * Stop a timer.
+ *
+ * The rte_timer_stop() function stops the timer associated with the
+ * timer handle *tim*. It may fail if the timer is currently running or
+ * being configured.
+ *
+ * If the timer is pending or stopped (for instance, already expired),
+ * the function will succeed. The timer handle tim must have been
+ * initialized using rte_timer_init(), otherwise, undefined behavior
+ * will occur.
+ *
+ * This function can be called safely from a timer callback. If it
+ * succeeds, the timer is not referenced anymore by the timer library
+ * and the timer structure can be freed (even in the callback
+ * function).
+ *
+ * @param tim
+ *   The timer handle.
+ * @return
+ *   - 0: Success; the timer is stopped.
+ *   - (-1): The timer is in the RUNNING or CONFIG state.
+ */
+int rte_timer_stop(struct rte_timer *tim);
+
+
+/**
+ * Loop until rte_timer_stop() succeeds.
+ *
+ * After a call to this function, the timer identified by *tim* is
+ * stopped. See rte_timer_stop() for details.
+ *
+ * @param tim
+ *   The timer handle.
+ */
+void rte_timer_stop_sync(struct rte_timer *tim);
+
+/**
+ * Test if a timer is pending.
+ *
+ * The rte_timer_pending() function tests the PENDING status
+ * of the timer handle *tim*. A PENDING timer is one that has been
+ * scheduled and whose function has not yet been called.
+ *
+ * @param tim
+ *   The timer handle.
+ * @return
+ *   - 0: The timer is not pending.
+ *   - 1: The timer is pending.
+ */
+int rte_timer_pending(struct rte_timer *tim);
+
+/**
+ * Manage the timer list and execute callback functions.
+ *
+ * This function must be called periodically from all cores
+ * main_loop(). It browses the list of pending timers and runs all
+ * timers that are expired.
+ *
+ * The precision of the timer depends on the call frequency of this
+ * function. However, the more often the function is called, the more
+ * CPU resources it will use.
+ */
+void rte_timer_manage(void);
+
+/**
+ * Dump statistics about timers.
+ */
+void rte_timer_dump_stats(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _RTE_TIMER_H_ */
diff --git a/mk/arch/i686/rte.vars.mk b/mk/arch/i686/rte.vars.mk
new file mode 100644 (file)
index 0000000..6f8e474
--- /dev/null
@@ -0,0 +1,59 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# arch:
+#
+#   - define ARCH variable (overriden by cmdline or by previous
+#     optional define in machine .mk)
+#   - define CROSS variable (overriden by cmdline or previous define
+#     in machine .mk)
+#   - define CPU_CFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_LDFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_ASFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - may override any previously defined variable
+#
+# examples for CONFIG_RTE_ARCH: i686, x86_64, x86_64_32
+#
+
+ARCH  ?= i386
+CROSS ?=
+
+CPU_CFLAGS  ?= -m32
+CPU_LDFLAGS ?= -m elf_i386
+CPU_ASFLAGS ?= -felf
+
+export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
diff --git a/mk/arch/x86_64/rte.vars.mk b/mk/arch/x86_64/rte.vars.mk
new file mode 100644 (file)
index 0000000..7d5beff
--- /dev/null
@@ -0,0 +1,59 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# arch:
+#
+#   - define ARCH variable (overriden by cmdline or by previous
+#     optional define in machine .mk)
+#   - define CROSS variable (overriden by cmdline or previous define
+#     in machine .mk)
+#   - define CPU_CFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_LDFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_ASFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - may override any previously defined variable
+#
+# examples for CONFIG_RTE_ARCH: i686, x86_64, x86_64_32
+#
+
+ARCH  ?= x86_64
+CROSS ?=
+
+CPU_CFLAGS  ?= -m64
+CPU_LDFLAGS ?= -melf_x86_64
+CPU_ASFLAGS ?= -felf64
+
+export ARCH CROSS CPU_CFLAGS CPU_LDFLAGS CPU_ASFLAGS
diff --git a/mk/exec-env/linuxapp/rte.app.mk b/mk/exec-env/linuxapp/rte.app.mk
new file mode 100644 (file)
index 0000000..2a3002d
--- /dev/null
@@ -0,0 +1,38 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+exec-env-appinstall:
+       @true
+
+exec-env-appclean:
+       @true
diff --git a/mk/exec-env/linuxapp/rte.vars.mk b/mk/exec-env/linuxapp/rte.vars.mk
new file mode 100644 (file)
index 0000000..e0ed298
--- /dev/null
@@ -0,0 +1,52 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# exec-env:
+#
+#   - define EXECENV_CFLAGS variable (overriden by cmdline)
+#   - define EXECENV_LDFLAGS variable (overriden by cmdline)
+#   - define EXECENV_ASFLAGS variable (overriden by cmdline)
+#   - may override any previously defined variable
+#
+# examples for RTE_EXEC_ENV: linuxapp, baremetal
+#
+
+EXECENV_CFLAGS  = -pthread
+EXECENV_LDFLAGS =
+EXECENV_ASFLAGS =
+
+# force applications to link with gcc/icc instead of using ld
+LINK_USING_CC := 1
+
+export EXECENV_CFLAGS EXECENV_LDFLAGS EXECENV_ASFLAGS
diff --git a/mk/internal/rte.build-post.mk b/mk/internal/rte.build-post.mk
new file mode 100644 (file)
index 0000000..fa7dd1b
--- /dev/null
@@ -0,0 +1,64 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# build helper .mk
+
+# fast way, no need to do prebuild and postbuild
+ifeq ($(PREBUILD)$(POSTBUILD),)
+
+_postbuild: $(_BUILD)
+       @touch _postbuild
+
+else # slower way
+
+_prebuild: $(PREBUILD)
+       @touch _prebuild
+
+ifneq ($(_BUILD),)
+$(_BUILD): _prebuild
+else
+_BUILD = _prebuild
+endif
+
+_build: $(_BUILD)
+       @touch _build
+
+ifneq ($(POSTBUILD),)
+$(POSTBUILD): _build
+else
+POSTBUILD = _build
+endif
+
+_postbuild: $(POSTBUILD)
+       @touch _postbuild
+endif
\ No newline at end of file
diff --git a/mk/internal/rte.build-pre.mk b/mk/internal/rte.build-pre.mk
new file mode 100644 (file)
index 0000000..d472082
--- /dev/null
@@ -0,0 +1,34 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+_BUILD_TARGETS := _prebuild _build _postbuild
diff --git a/mk/internal/rte.clean-post.mk b/mk/internal/rte.clean-post.mk
new file mode 100644 (file)
index 0000000..6c859a6
--- /dev/null
@@ -0,0 +1,64 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# clean helper .mk
+
+# fast way, no need to do preclean and postclean
+ifeq ($(PRECLEAN)$(POSTCLEAN),)
+
+_postclean: $(_CLEAN)
+       @touch _postclean
+
+else # slower way
+
+_preclean: $(PRECLEAN)
+       @touch _preclean
+
+ifneq ($(_CLEAN),)
+$(_CLEAN): _preclean
+else
+_CLEAN = _preclean
+endif
+
+_clean: $(_CLEAN)
+       @touch _clean
+
+ifneq ($(POSTCLEAN),)
+$(POSTCLEAN): _clean
+else
+POSTCLEAN = _clean
+endif
+
+_postclean: $(POSTCLEAN)
+       @touch _postclean
+endif
diff --git a/mk/internal/rte.clean-pre.mk b/mk/internal/rte.clean-pre.mk
new file mode 100644 (file)
index 0000000..aaec294
--- /dev/null
@@ -0,0 +1,34 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+_CLEAN_TARGETS := _preclean _clean _postclean
diff --git a/mk/internal/rte.compile-post.mk b/mk/internal/rte.compile-post.mk
new file mode 100644 (file)
index 0000000..f868e92
--- /dev/null
@@ -0,0 +1,35 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# no rule no build these files
+$(DEPS-y) $(CMDS-y):
diff --git a/mk/internal/rte.compile-pre.mk b/mk/internal/rte.compile-pre.mk
new file mode 100644 (file)
index 0000000..8ef9750
--- /dev/null
@@ -0,0 +1,178 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# Common to rte.lib.mk, rte.app.mk, rte.obj.mk
+#
+
+SRCS-all := $(SRCS-y) $(SRCS-n) $(SRCS-)
+
+# convert source to obj file
+src2obj = $(strip $(patsubst %.c,%.o,\
+       $(patsubst %.S,%_s.o,$(1))))
+
+# add a dot in front of the file name
+dotfile = $(strip $(foreach f,$(1),\
+       $(join $(dir $f),.$(notdir $f))))
+
+# convert source/obj files into dot-dep filename (does not
+# include .S files)
+src2dep = $(strip $(call dotfile,$(patsubst %.c,%.o.d, \
+               $(patsubst %.S,,$(1)))))
+obj2dep = $(strip $(call dotfile,$(patsubst %.o,%.o.d,$(1))))
+
+# convert source/obj files into dot-cmd filename
+src2cmd = $(strip $(call dotfile,$(patsubst %.c,%.o.cmd, \
+               $(patsubst %.S,%_s.o.cmd,$(1)))))
+obj2cmd = $(strip $(call dotfile,$(patsubst %.o,%.o.cmd,$(1))))
+
+OBJS-y := $(call src2obj,$(SRCS-y))
+OBJS-n := $(call src2obj,$(SRCS-n))
+OBJS-  := $(call src2obj,$(SRCS-))
+OBJS-all := $(filter-out $(SRCS-all),$(OBJS-y) $(OBJS-n) $(OBJS-))
+
+DEPS-y := $(call src2dep,$(SRCS-y))
+DEPS-n := $(call src2dep,$(SRCS-n))
+DEPS-  := $(call src2dep,$(SRCS-))
+DEPS-all := $(DEPS-y) $(DEPS-n) $(DEPS-)
+DEPSTMP-all := $(DEPS-all:%.d=%.d.tmp)
+
+CMDS-y := $(call src2cmd,$(SRCS-y))
+CMDS-n := $(call src2cmd,$(SRCS-n))
+CMDS-  := $(call src2cmd,$(SRCS-))
+CMDS-all := $(CMDS-y) $(CMDS-n) $(CMDS-)
+
+-include $(DEPS-y) $(CMDS-y)
+
+# command to compile a .c file to generate an object
+ifeq ($(USE_HOST),1)
+C_TO_O = $(HOSTCC) -Wp,-MD,$(call obj2dep,$(@)).tmp $(HOST_CFLAGS) \
+       $(CFLAGS_$(@)) $(HOST_EXTRA_CFLAGS) -o $@ -c $<
+C_TO_O_STR = $(subst ','\'',$(C_TO_O)) #'# fix syntax highlight
+C_TO_O_DISP = $(if $(V),"$(C_TO_O_STR)","  HOSTCC $(@)")
+else
+C_TO_O = $(CC) -Wp,-MD,$(call obj2dep,$(@)).tmp $(CFLAGS) \
+       $(CFLAGS_$(@)) $(EXTRA_CFLAGS) -o $@ -c $<
+C_TO_O_STR = $(subst ','\'',$(C_TO_O)) #'# fix syntax highlight
+C_TO_O_DISP = $(if $(V),"$(C_TO_O_STR)","  CC $(@)")
+endif
+C_TO_O_CMD = "cmd_$@ = $(C_TO_O_STR)"
+C_TO_O_DO = @set -e; \
+       echo $(C_TO_O_DISP); \
+       $(C_TO_O) && \
+       echo $(C_TO_O_CMD) > $(call obj2cmd,$(@)) && \
+       sed 's,'$@':,dep_'$@' =,' $(call obj2dep,$(@)).tmp > $(call obj2dep,$(@)) && \
+       rm -f $(call obj2dep,$(@)).tmp
+
+# return an empty string if string are equal
+compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1)))
+
+# return a non-empty string if the dst file does not exist
+file_missing = $(call compare,$(wildcard $@),$@)
+
+# return a non-empty string if cmdline changed
+cmdline_changed = $(call compare,$(cmd_$@),$(1))
+
+# return a non-empty string if a dependency file does not exist
+depfile_missing = $(call compare,$(wildcard $(dep_$@)),$(dep_$@))
+
+# return an empty string if no prereq is newer than target
+#     - $^ -> names of all the prerequisites
+#     - $(wildcard $^) -> every existing prereq
+#     - $(filter-out $(wildcard $^),$^) -> every prereq that don't
+#       exist (filter-out removes existing ones from the list)
+#     - $? -> names of all the prerequisites newer than target
+depfile_newer = $(strip $(filter-out FORCE,$? \
+       $(filter-out $(wildcard $^),$^)))
+
+# return 1 if parameter is a non-empty string, else 0
+boolean = $(if $1,1,0)
+
+#
+# Compile .c file if needed
+# Note: dep_$$@ is from the .d file and DEP_$$@ can be specified by
+# user (by default it is empty)
+#
+.SECONDEXPANSION:
+%.o: %.c $$(wildcard $$(dep_$$@)) $$(DEP_$$(@)) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$< -> $@ " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(C_TO_O_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer))")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(C_TO_O_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(C_TO_O_DO))
+
+# command to assemble a .S file to generate an object
+ifeq ($(USE_HOST),1)
+S_TO_O = $(CPP) $(HOST_CPPFLAGS) $($(@)_CPPFLAGS) $(HOST_EXTRA_CPPFLAGS) $< $(@).tmp && \
+       $(HOSTAS) $(HOST_ASFLAGS) $($(@)_ASFLAGS) $(HOST_EXTRA_ASFLAGS) -o $@ $(@).tmp
+S_TO_O_STR = $(subst ','\'',$(S_TO_O)) #'# fix syntax highlight
+S_TO_O_DISP =  $(if $(V),"$(S_TO_O_STR)","  HOSTAS $(@)")
+else
+S_TO_O = $(CPP) $(CPPFLAGS) $($(@)_CPPFLAGS) $(EXTRA_CPPFLAGS) $< -o $(@).tmp && \
+       $(AS) $(ASFLAGS) $($(@)_ASFLAGS) $(EXTRA_ASFLAGS) -o $@ $(@).tmp
+S_TO_O_STR = $(subst ','\'',$(S_TO_O)) #'# fix syntax highlight
+S_TO_O_DISP =  $(if $(V),"$(S_TO_O_STR)","  AS $(@)")
+endif
+
+S_TO_O_CMD = "cmd_$@ = $(S_TO_O_STR)"
+S_TO_O_DO = @set -e; \
+       echo $(S_TO_O_DISP); \
+       $(S_TO_O) && \
+       echo $(S_TO_O_CMD) > $(call obj2cmd,$(@))
+
+#
+# Compile .S file if needed
+# Note: DEP_$$@ can be specified by user (by default it is empty)
+#
+%_s.o: %.S $$(DEP_$$@) FORCE
+       @[ ! -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$< -> $@ " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(S_TO_O_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(S_TO_O_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(S_TO_O_DO))
diff --git a/mk/internal/rte.depdirs-post.mk b/mk/internal/rte.depdirs-post.mk
new file mode 100644 (file)
index 0000000..2a8e8fe
--- /dev/null
@@ -0,0 +1,44 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+.PHONY: depdirs
+depdirs:
+       @for d in $(DEPDIRS-y); do \
+               $(RTE_SDK)/scripts/depdirs-rule.sh $(S) $$d ; \
+       done
+
+.PHONY: depgraph
+depgraph:
+       @for d in $(DEPDIRS-y); do \
+               echo "    \"$(S)\" -> \"$$d\"" ; \
+       done
diff --git a/mk/internal/rte.depdirs-pre.mk b/mk/internal/rte.depdirs-pre.mk
new file mode 100644 (file)
index 0000000..8b4fb92
--- /dev/null
@@ -0,0 +1,34 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# nothing
diff --git a/mk/internal/rte.exthelp-post.mk b/mk/internal/rte.exthelp-post.mk
new file mode 100644 (file)
index 0000000..b27ba27
--- /dev/null
@@ -0,0 +1,41 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+HELP_FILE = $(RTE_SDK)/doc/rst/developpers_reference/app_mkhelp.rst
+
+help:
+       @if [ ! -f $(HELP_FILE) ]; then \
+               echo "Cannot find RTE SDK documentation" ; \
+               exit 0 ; \
+       fi
+       @sed -e '1,/.*OF THE POSSIBILITY OF SUCH DAMAGE.*/ d' $(HELP_FILE)
diff --git a/mk/internal/rte.install-post.mk b/mk/internal/rte.install-post.mk
new file mode 100644 (file)
index 0000000..1e84a10
--- /dev/null
@@ -0,0 +1,101 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# install helper .mk
+
+#
+# generate rules to install files in RTE_OUTPUT.
+#
+# arg1: relative install dir in RTE_OUTPUT
+# arg2: relative file name in a source dir (VPATH)
+#
+define install_rule
+$(addprefix $(RTE_OUTPUT)/$(1)/,$(notdir $(2))): $(2)
+       @echo "  INSTALL-FILE $(addprefix $(1)/,$(notdir $(2)))"
+       @[ -d $(RTE_OUTPUT)/$(1) ] || mkdir -p $(RTE_OUTPUT)/$(1)
+       @cp -rf $$(<) $(RTE_OUTPUT)/$(1)
+endef
+
+$(foreach dir,$(INSTALL-DIRS-y),\
+       $(foreach file,$(INSTALL-y-$(dir)),\
+               $(eval $(call install_rule,$(dir),$(file)))))
+
+
+#
+# generate rules to install symbolic links of files in RTE_OUTPUT.
+#
+# arg1: relative install dir in RTE_OUTPUT
+# arg2: relative file name in a source dir (VPATH)
+#
+define symlink_rule
+$(addprefix $(RTE_OUTPUT)/$(1)/,$(notdir $(2))): $(2)
+       @echo "  SYMLINK-FILE $(addprefix $(1)/,$(notdir $(2)))"
+       @[ -d $(RTE_OUTPUT)/$(1) ] || mkdir -p $(RTE_OUTPUT)/$(1)
+       $(Q)ln -nsf `$(RTE_SDK)/scripts/relpath.sh $$(<) $(RTE_OUTPUT)/$(1)` \
+               $(RTE_OUTPUT)/$(1)
+endef
+
+$(foreach dir,$(SYMLINK-DIRS-y),\
+       $(foreach file,$(SYMLINK-y-$(dir)),\
+               $(eval $(call symlink_rule,$(dir),$(file)))))
+
+
+# fast way, no need to do preinstall and postinstall
+ifeq ($(PREINSTALL)$(POSTINSTALL),)
+
+_postinstall: $(_INSTALL)
+       @touch _postinstall
+
+else # slower way
+
+_preinstall: $(PREINSTALL)
+       @touch _preinstall
+
+ifneq ($(_INSTALL),)
+$(_INSTALL): _preinstall
+else
+_INSTALL = _preinstall
+endif
+
+_install: $(_INSTALL)
+       @touch _install
+
+ifneq ($(POSTINSTALL),)
+$(POSTINSTALL): _install
+else
+POSTINSTALL = _install
+endif
+
+_postinstall: $(POSTINSTALL)
+       @touch _postinstall
+endif
diff --git a/mk/internal/rte.install-pre.mk b/mk/internal/rte.install-pre.mk
new file mode 100644 (file)
index 0000000..c8a9b30
--- /dev/null
@@ -0,0 +1,62 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# get all variables starting with "INSTALL-y-", and extract the
+# installation dir and path
+#
+INSTALL-y := $(filter INSTALL-y-%,$(.VARIABLES))
+INSTALL-n := $(filter INSTALL-n-%,$(.VARIABLES))
+INSTALL- := $(filter INSTALL--%,$(.VARIABLES))
+INSTALL-DIRS-y := $(patsubst INSTALL-y-%,%,$(INSTALL-y))
+INSTALL-FILES-y := $(foreach i,$(INSTALL-DIRS-y),\
+       $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(INSTALL-y-$(i)))))
+INSTALL-FILES-all := $(foreach i,$(INSTALL-DIRS-y) $(INSTALL-DIRS-n) $(INSTALL-DIRS-),\
+       $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(INSTALL-y-$(i)))))
+
+_INSTALL_TARGETS := _preinstall _install _postinstall
+
+#
+# get all variables starting with "SYMLINK-y-", and extract the
+# installation dir and path
+#
+SYMLINK-y := $(filter SYMLINK-y-%,$(.VARIABLES))
+SYMLINK-n := $(filter SYMLINK-n-%,$(.VARIABLES))
+SYMLINK- := $(filter SYMLINK--%,$(.VARIABLES))
+SYMLINK-DIRS-y := $(patsubst SYMLINK-y-%,%,$(SYMLINK-y))
+SYMLINK-FILES-y := $(foreach i,$(SYMLINK-DIRS-y),\
+       $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(SYMLINK-y-$(i)))))
+SYMLINK-FILES-all := $(foreach i,$(SYMLINK-DIRS-y) $(SYMLINK-DIRS-n) $(SYMLINK-DIRS-),\
+       $(addprefix $(RTE_OUTPUT)/$(i)/,$(notdir $(SYMLINK-y-$(i)))))
+
+_SYMLINK_TARGETS := _presymlink _symlink _postsymlink
diff --git a/mk/machine/atm/rte.vars.mk b/mk/machine/atm/rte.vars.mk
new file mode 100644 (file)
index 0000000..e49e48b
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=atom
+CPUFLAGS = SSE SSE2 SSE3 SSSE3
diff --git a/mk/machine/default/rte.vars.mk b/mk/machine/default/rte.vars.mk
new file mode 100644 (file)
index 0000000..35d9d4c
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS += -march=core2
+CPUFLAGS = SSE SSE2 SSE3
diff --git a/mk/machine/ivb/rte.vars.mk b/mk/machine/ivb/rte.vars.mk
new file mode 100644 (file)
index 0000000..cc5a3bd
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=core-avx-i
+CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ AVX RDRAND FSGSBASE F16C
diff --git a/mk/machine/native/rte.vars.mk b/mk/machine/native/rte.vars.mk
new file mode 100644 (file)
index 0000000..5f4c7df
--- /dev/null
@@ -0,0 +1,111 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=native
+AUTO_CPUFLAGS = $(shell cat /proc/cpuinfo | grep flags -m 1)
+
+# adding flags to CPUFLAGS
+
+ifneq ($(filter $(AUTO_CPUFLAGS),sse),)
+ CPUFLAGS += SSE
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),sse2),)
+ CPUFLAGS += SSE2
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),sse3),)
+ CPUFLAGS += SSE3
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),ssse3),)
+ CPUFLAGS += SSSE3
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),sse4_1),)
+ CPUFLAGS += SSE4_1
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),sse4_2),)
+ CPUFLAGS += SSE4_2
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),aes),)
+ CPUFLAGS += AES
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),pclmulqdq),)
+ CPUFLAGS += PCLMULQDQ
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),avx),)
+ CPUFLAGS += AVX
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),rdrnd),)
+ CPUFLAGS += RDRAND
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),fsgsbase),)
+ CPUFLAGS += FSGSBASE
+endif
+
+ifneq ($(filter $(AUTO_CPUFLAGS),f16c),)
+ CPUFLAGS += F16C
+endif
\ No newline at end of file
diff --git a/mk/machine/nhm/rte.vars.mk b/mk/machine/nhm/rte.vars.mk
new file mode 100644 (file)
index 0000000..9291d28
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=corei7
+CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2
diff --git a/mk/machine/snb/rte.vars.mk b/mk/machine/snb/rte.vars.mk
new file mode 100644 (file)
index 0000000..63f3e6b
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=corei7-avx
+CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ AVX
diff --git a/mk/machine/wsm/rte.vars.mk b/mk/machine/wsm/rte.vars.mk
new file mode 100644 (file)
index 0000000..98176c4
--- /dev/null
@@ -0,0 +1,61 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - may override any previously defined variable
+#
+
+# ARCH =
+# CROSS =
+# MACHINE_CFLAGS =
+# MACHINE_LDFLAGS =
+# MACHINE_ASFLAGS =
+# CPU_CFLAGS =
+# CPU_LDFLAGS =
+# CPU_ASFLAGS =
+
+MACHINE_CFLAGS = -march=corei7 -maes -mpclmul
+CPUFLAGS = SSE SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AES PCLMULQDQ
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
new file mode 100644 (file)
index 0000000..019e7c3
--- /dev/null
@@ -0,0 +1,236 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_BUILD = $(APP)
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y)
+_INSTALL += $(RTE_OUTPUT)/app/$(APP) $(RTE_OUTPUT)/app/$(APP).map
+POSTINSTALL += target-appinstall
+_CLEAN = doclean
+POSTCLEAN += target-appclean
+
+ifeq ($(NO_LDSCRIPT),)
+LDSCRIPT = $(RTE_LDSCRIPT)
+endif
+
+# default path for libs
+LDLIBS += -L$(RTE_SDK_BIN)/lib
+
+#
+# Include libraries depending on config if NO_AUTOLIBS is not set
+# Order is important: from higher level to lower level
+#
+ifeq ($(NO_AUTOLIBS),)
+
+ifeq ($(CONFIG_RTE_LIBRTE_IGB_PMD),y)
+LDLIBS += -lrte_pmd_igb
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_MBUF),y)
+LDLIBS += -lrte_mbuf
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_CMDLINE),y)
+LDLIBS += -lrte_cmdline
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_TIMER),y)
+LDLIBS += -lrte_timer
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
+LDLIBS += -lrte_hash
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
+LDLIBS += -lrte_lpm
+endif
+
+LDLIBS += --start-group
+
+ifeq ($(CONFIG_RTE_LIBRTE_ETHER),y)
+LDLIBS += -lethdev
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_MALLOC),y)
+LDLIBS += -lrte_malloc
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_MEMPOOL),y)
+LDLIBS += -lrte_mempool
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_RING),y)
+LDLIBS += -lrte_ring
+endif
+
+ifeq ($(CONFIG_RTE_LIBC),y)
+LDLIBS += -lc
+endif
+
+ifeq ($(CONFIG_RTE_LIBGLOSS),y)
+LDLIBS += -lgloss
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_EAL),y)
+LDLIBS += -lrte_eal
+endif
+
+LDLIBS += $(EXECENV_LDLIBS)
+
+LDLIBS += --end-group
+
+endif # ifeq ($(NO_AUTOLIBS),)
+
+LDLIBS += $(CPU_LDLIBS)
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
+
+ifeq ($(LINK_USING_CC),1)
+comma := ,
+LDLIBS := $(addprefix -Wl$(comma),$(LDLIBS))
+LDFLAGS := $(addprefix -Wl$(comma),$(LDFLAGS))
+EXTRA_LDFLAGS := $(addprefix -Wl$(comma),$(EXTRA_LDFLAGS))
+O_TO_EXE = $(CC) $(CFLAGS) $(LDFLAGS_$(@)) \
+       -Wl,-Map=$(@).map,--cref -o $@ $(OBJS-y) $(LDFLAGS) $(EXTRA_LDFLAGS) $(LDLIBS)
+else
+O_TO_EXE = $(LD) $(LDFLAGS) $(LDFLAGS_$(@)) $(EXTRA_LDFLAGS) \
+       -Map=$(@).map --cref -o $@ $(OBJS-y) $(LDLIBS)
+endif
+O_TO_EXE_STR = $(subst ','\'',$(O_TO_EXE)) #'# fix syntax highlight
+O_TO_EXE_DISP = $(if $(V),"$(O_TO_EXE_STR)","  LD $(@)")
+O_TO_EXE_CMD = "cmd_$@ = $(O_TO_EXE_STR)"
+O_TO_EXE_DO = @set -e; \
+       echo $(O_TO_EXE_DISP); \
+       $(O_TO_EXE) && \
+       echo $(O_TO_EXE_CMD) > $(call exe2cmd,$(@))
+
+-include .$(APP).cmd
+
+# path where libraries are retrieved
+LDLIBS_PATH := $(subst -Wl$(comma)-L,,$(filter -Wl$(comma)-L%,$(LDLIBS)))
+LDLIBS_PATH += $(subst -L,,$(filter -L%,$(LDLIBS)))
+
+# list of .a files that are linked to this application
+LDLIBS_NAMES := $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS)))
+LDLIBS_NAMES += $(patsubst -Wl$(comma)-l%,lib%.a,$(filter -Wl$(comma)-l%,$(LDLIBS)))
+
+# list of found libraries files (useful for deps). If not found, the
+# library is silently ignored and dep won't be checked
+LDLIBS_FILES := $(wildcard $(foreach dir,$(LDLIBS_PATH),\
+       $(addprefix $(dir)/,$(LDLIBS_NAMES))))
+
+#
+# Compile executable file if needed
+#
+$(APP): $(OBJS-y) $(LDLIBS_FILES) $(DEP_$(APP)) $(LDSCRIPT) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$< -> $@ " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_EXE_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(O_TO_EXE_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(O_TO_EXE_DO))
+
+#
+# install app in $(RTE_OUTPUT)/app
+#
+$(RTE_OUTPUT)/app/$(APP): $(APP)
+       @echo "  INSTALL-APP $(APP)"
+       @[ -d $(RTE_OUTPUT)/app ] || mkdir -p $(RTE_OUTPUT)/app
+       $(Q)cp -f $(APP) $(RTE_OUTPUT)/app
+
+#
+# install app map file in $(RTE_OUTPUT)/app
+#
+$(RTE_OUTPUT)/app/$(APP).map: $(APP)
+       @echo "  INSTALL-MAP $(APP).map"
+       @[ -d $(RTE_OUTPUT)/app ] || mkdir -p $(RTE_OUTPUT)/app
+       $(Q)cp -f $(APP).map $(RTE_OUTPUT)/app
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+       $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+.PHONY: doclean
+doclean:
+       $(Q)rm -rf $(APP) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+         $(CMDS-all) $(INSTALL-FILES-all) .$(APP).cmd
+
+
+include $(RTE_SDK)/mk/internal/rte.compile-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+ifneq ($(wildcard $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk),)
+include $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.app.mk
+else
+include $(RTE_SDK)/mk/target/generic/rte.app.mk
+endif
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.doc.mk b/mk/rte.doc.mk
new file mode 100644 (file)
index 0000000..b57504a
--- /dev/null
@@ -0,0 +1,127 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+DEFAULT_DPI ?= 300
+
+ifeq ($(BASEDOCDIR),)
+$(error "must be called from RTE root Makefile")
+endif
+ifeq ($(DOCDIR),)
+$(error "must be called from RTE root Makefile")
+endif
+
+VPATH = $(abspath $(BASEDOCDIR)/$(DOCDIR))
+
+pngfiles = $(patsubst %.svg,%.png,$(SVG))
+pdfimgfiles = $(patsubst %.svg,%.pdf,$(SVG))
+htmlfiles = $(patsubst %.rst,%.html,$(RST))
+pdffiles = $(patsubst %.rst,%.pdf,$(RST))
+
+.PHONY: all doc clean
+
+compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1)))
+dirname = $(patsubst %/,%,$(dir $1))
+
+# windows only: this is needed for native programs that do not handle
+# unix-like paths on win32
+ifdef COMSPEC
+winpath = "$(shell cygpath --windows $(abspath $(1)))"
+else
+winpath = $(1)
+endif
+
+all doc: $(pngfiles) $(htmlfiles) $(pdffiles) $(DIRS)
+       @true
+
+htmldoc: $(pngfiles) $(htmlfiles) $(DIRS)
+       @true
+
+pdfdoc: $(pngfiles) $(pdffiles) $(DIRS)
+       @true
+
+doxydoc: $(pdfimgfiles) $(DIRS)
+       @true
+
+.PHONY: $(DIRS)
+$(DIRS):
+       @[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@
+       $(Q)$(MAKE) DOCDIR=$(DOCDIR)/$@ BASEDOCDIR=$(BASEDOCDIR)/.. \
+               -f $(RTE_SDK)/doc/$(DOCDIR)/$@/Makefile -C $(CURDIR)/$@ $(MAKECMDGOALS)
+
+%.png: %.svg
+       @echo "  INKSCAPE $(@)"
+       $(Q)inkscape -d $(DEFAULT_DPI) -D -b ffffff -y 1.0 -e $(call winpath,$(@)) $(call winpath,$(<))
+
+%.pdf: %.svg
+       @echo "  INKSCAPE $(@)"
+       $(Q)inkscape -d $(DEFAULT_DPI) -D -b ffffff -y 1.0 -A $(call winpath,$(@)) $(call winpath,$(<))
+
+.SECONDEXPANSION:
+$(foreach f,$(RST),$(eval DEP_$(f:%.rst=%.html) = $(DEP_$(f))))
+%.html: %.rst $$(DEP_$$@)
+       @echo "  RST2HTML $(@)"
+       $(Q)mkdir -p `dirname $(@)` ; \
+       python $(BASEDOCDIR)/gen/gen-common.py html $(BASEDOCDIR) > $(BASEDOCDIR)/gen/rte.rst ; \
+       python $(BASEDOCDIR)/html/rst2html-highlight.py --link-stylesheet \
+               --stylesheet-path=$(BASEDOCDIR)/html/rte.css \
+               --strip-comments< $(<) > $(@) ; \
+
+# there is a bug in rst2pdf (issue 311): replacement of DSTDIR is not
+# what we expect: we should not have to add doc/
+ifdef COMSPEC
+WORKAROUND_PATH=$(BASEDOCDIR)
+else
+WORKAROUND_PATH=$(BASEDOCDIR)/doc
+endif
+
+.SECONDEXPANSION:
+$(foreach f,$(RST),$(eval DEP_$(f:%.rst=%.pdf) = $(DEP_$(f))))
+%.pdf: %.rst $$(DEP_$$@)
+       @echo "  RST2PDF $(@)"
+       $(Q)mkdir -p `dirname $(@)` ; \
+       python $(BASEDOCDIR)/gen/gen-common.py pdf $(BASEDOCDIR) > $(BASEDOCDIR)/gen/rte.rst ; \
+       rst2pdf -s $(BASEDOCDIR)/pdf/rte-stylesheet.json \
+               --default-dpi=300 < $(<) > $(@)
+
+CLEANDIRS = $(addsuffix _clean,$(DIRS))
+
+docclean clean: $(CLEANDIRS)
+       @rm -f $(htmlfiles) $(pdffiles) $(pngfiles) $(pdfimgfiles) $(BASEDOCDIR)/gen/rte.rst
+
+%_clean:
+       @if [ -f $(RTE_SDK)/doc/$(DOCDIR)/$*/Makefile -a -d $(CURDIR)/$* ]; then \
+               $(MAKE) DOCDIR=$(DOCDIR)/$* BASEDOCDIR=$(BASEDOCDIR)/.. \
+               -f $(RTE_SDK)/doc/$(DOCDIR)/$*/Makefile -C $(CURDIR)/$* clean ; \
+       fi
+
+.NOTPARALLEL:
diff --git a/mk/rte.extapp.mk b/mk/rte.extapp.mk
new file mode 100644 (file)
index 0000000..f6eb6d7
--- /dev/null
@@ -0,0 +1,56 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+MAKEFLAGS += --no-print-directory
+
+# we must create the output dir first and recall the same Makefile
+# from this directory
+ifeq ($(NOT_FIRST_CALL),)
+
+NOT_FIRST_CALL = 1
+export NOT_FIRST_CALL
+
+all:
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+
+%::
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+else
+include $(RTE_SDK)/mk/rte.app.mk
+endif
+
+include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk
diff --git a/mk/rte.extlib.mk b/mk/rte.extlib.mk
new file mode 100644 (file)
index 0000000..af72d35
--- /dev/null
@@ -0,0 +1,56 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+MAKEFLAGS += --no-print-directory
+
+# we must create the output dir first and recall the same Makefile
+# from this directory
+ifeq ($(NOT_FIRST_CALL),)
+
+NOT_FIRST_CALL = 1
+export NOT_FIRST_CALL
+
+all:
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+
+%::
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+else
+include $(RTE_SDK)/mk/rte.lib.mk
+endif
+
+include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk
diff --git a/mk/rte.extobj.mk b/mk/rte.extobj.mk
new file mode 100644 (file)
index 0000000..96f9dbb
--- /dev/null
@@ -0,0 +1,56 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+MAKEFLAGS += --no-print-directory
+
+# we must create the output dir first and recall the same Makefile
+# from this directory
+ifeq ($(NOT_FIRST_CALL),)
+
+NOT_FIRST_CALL = 1
+export NOT_FIRST_CALL
+
+all:
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+
+%::
+       $(Q)mkdir -p $(RTE_OUTPUT)
+       $(Q)$(MAKE) -C $(RTE_OUTPUT) -f $(RTE_EXTMK) $@ \
+               S=$(RTE_SRCDIR) O=$(RTE_OUTPUT) SRCDIR=$(RTE_SRCDIR)
+else
+include $(RTE_SDK)/mk/rte.obj.mk
+endif
+
+include $(RTE_SDK)/mk/internal/rte.exthelp-post.mk
diff --git a/mk/rte.extvars.mk b/mk/rte.extvars.mk
new file mode 100644 (file)
index 0000000..b7e5c2c
--- /dev/null
@@ -0,0 +1,83 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# directory where sources are located
+#
+ifdef S
+ifeq ("$(origin S)", "command line")
+RTE_SRCDIR := $(abspath $(S))
+endif
+endif
+RTE_SRCDIR  ?= $(CURDIR)
+export RTE_SRCDIR
+
+#
+# Makefile to call once $(RTE_OUTPUT) is created
+#
+ifdef M
+ifeq ("$(origin M)", "command line")
+RTE_EXTMK := $(abspath $(M))
+endif
+endif
+RTE_EXTMK ?= $(RTE_SRCDIR)/Makefile
+export RTE_EXTMK
+
+RTE_SDK_BIN := $(RTE_SDK)/$(RTE_TARGET)
+
+#
+# Output files wil go in a separate directory: default output is
+# $(RTE_SRCDIR)/build
+# Output dir can be given as command line using "O="
+#
+ifdef O
+ifeq ("$(origin O)", "command line")
+RTE_OUTPUT := $(abspath $(O))
+endif
+endif
+RTE_OUTPUT ?= $(RTE_SRCDIR)/build
+export RTE_OUTPUT
+
+# if we are building an external application, include SDK
+# configuration and include project configuration if any
+include $(RTE_SDK_BIN)/.config
+ifneq ($(wildcard $(RTE_OUTPUT)/.config),)
+  include $(RTE_OUTPUT)/.config
+endif
+# remove double-quotes from config names
+RTE_ARCH := $(CONFIG_RTE_ARCH:"%"=%)
+RTE_MACHINE := $(CONFIG_RTE_MACHINE:"%"=%)
+RTE_EXEC_ENV := $(CONFIG_RTE_EXEC_ENV:"%"=%)
+RTE_TOOLCHAIN := $(CONFIG_RTE_TOOLCHAIN:"%"=%)
+
+
diff --git a/mk/rte.gnuconfigure.mk b/mk/rte.gnuconfigure.mk
new file mode 100644 (file)
index 0000000..f031be3
--- /dev/null
@@ -0,0 +1,76 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+_BUILD = configure
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y)
+_CLEAN = doclean
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+configure:
+       $(Q)cd $(CONFIGURE_PATH) ; \
+       ./configure --prefix $(CONFIGURE_PREFIX) $(CONFIGURE_ARGS) ; \
+       make ; \
+       make install
+
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+       $(Q)cd $(CONFIGURE_PATH) ; make clean
+       $(Q)rm -f $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.hostapp.mk b/mk/rte.hostapp.mk
new file mode 100644 (file)
index 0000000..15e1478
--- /dev/null
@@ -0,0 +1,125 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# tell rte.compile-pre.mk to use HOSTCC instead of CC
+USE_HOST := 1
+include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_BUILD = $(HOSTAPP)
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/hostapp/$(HOSTAPP)
+_CLEAN = doclean
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
+
+O_TO_EXE = $(HOSTCC) $(HOST_LDFLAGS) $(LDFLAGS_$(@)) \
+       $(EXTRA_HOST_LDFLAGS) -o $@ $(OBJS-y) $(LDLIBS)
+O_TO_EXE_STR = $(subst ','\'',$(O_TO_EXE)) #'# fix syntax highlight
+O_TO_EXE_DISP = $(if $(V),"$(O_TO_EXE_STR)","  HOSTLD $(@)")
+O_TO_EXE_CMD = "cmd_$@ = $(O_TO_EXE_STR)"
+O_TO_EXE_DO = @set -e; \
+       echo $(O_TO_EXE_DISP); \
+       $(O_TO_EXE) && \
+       echo $(O_TO_EXE_CMD) > $(call exe2cmd,$(@))
+
+-include .$(HOSTAPP).cmd
+
+# list of .a files that are linked to this application
+LDLIBS_FILES := $(wildcard \
+       $(addprefix $(RTE_OUTPUT)/lib/, \
+       $(patsubst -l%,lib%.a,$(filter -l%,$(LDLIBS)))))
+
+#
+# Compile executable file if needed
+#
+$(HOSTAPP): $(OBJS-y) $(LDLIBS_FILES) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$@ -> $< " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_EXE_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(O_TO_EXE_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(O_TO_EXE_DO))
+
+#
+# install app in $(RTE_OUTPUT)/hostapp
+#
+$(RTE_OUTPUT)/hostapp/$(HOSTAPP): $(HOSTAPP)
+       @echo "  INSTALL-HOSTAPP $(HOSTAPP)"
+       @[ -d $(RTE_OUTPUT)/hostapp ] || mkdir -p $(RTE_OUTPUT)/hostapp
+       $(Q)cp -f $(HOSTAPP) $(RTE_OUTPUT)/hostapp
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+       $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+.PHONY: doclean
+doclean:
+       $(Q)rm -rf $(HOSTAPP) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+         $(CMDS-all) $(INSTALL-FILES-all) .$(HOSTAPP).cmd
+
+
+include $(RTE_SDK)/mk/internal/rte.compile-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.hostlib.mk b/mk/rte.hostlib.mk
new file mode 100644 (file)
index 0000000..fcaade1
--- /dev/null
@@ -0,0 +1,118 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# tell rte.compile-pre.mk to use HOSTCC instead of CC
+USE_HOST := 1
+include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_BUILD = $(HOSTLIB)
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/hostlib/$(HOSTLIB)
+_CLEAN = doclean
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
+
+O_TO_A = $(AR) crus $(HOSTLIB) $(OBJS-y)
+O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight
+O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)","  HOSTAR $(@)")
+O_TO_A_CMD = "cmd_$@ = $(O_TO_A_STR)"
+O_TO_A_DO = @set -e; \
+       echo $(O_TO_A_DISP); \
+       $(O_TO_A) && \
+       echo $(O_TO_A_CMD) > $(call exe2cmd,$(@))
+
+-include .$(HOSTLIB).cmd
+
+#
+# Archive objects in .a file if needed
+#
+$(HOSTLIB): $(OBJS-y) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$@ -> $< " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_A_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(O_TO_A_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(O_TO_A_DO))
+
+#
+# install lib in $(RTE_OUTPUT)/hostlib
+#
+$(RTE_OUTPUT)/hostlib/$(HOSTLIB): $(HOSTLIB)
+       @echo "  INSTALL-HOSTLIB $(HOSTLIB)"
+       @[ -d $(RTE_OUTPUT)/hostlib ] || mkdir -p $(RTE_OUTPUT)/hostlib
+       $(Q)cp -f $(HOSTLIB) $(RTE_OUTPUT)/hostlib
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+       $(Q)rm -rf $(HOSTLIB) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+         $(CMDS-all) $(INSTALL-FILES-all)
+       $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+include $(RTE_SDK)/mk/internal/rte.compile-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.install.mk b/mk/rte.install.mk
new file mode 100644 (file)
index 0000000..9087aaf
--- /dev/null
@@ -0,0 +1,60 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# install-only makefile (no build target)
+
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y)
+_CLEAN = doclean
+
+.PHONY: all
+all: _postinstall
+       @true
+
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+       @rm -rf $(INSTALL-FILES-all)
+       @rm -f $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
diff --git a/mk/rte.lib.mk b/mk/rte.lib.mk
new file mode 100644 (file)
index 0000000..d3737fe
--- /dev/null
@@ -0,0 +1,116 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_BUILD = $(LIB)
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) $(RTE_OUTPUT)/lib/$(LIB)
+_CLEAN = doclean
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
+
+O_TO_A = $(AR) crus $(LIB) $(OBJS-y)
+O_TO_A_STR = $(subst ','\'',$(O_TO_A)) #'# fix syntax highlight
+O_TO_A_DISP = $(if $(V),"$(O_TO_A_STR)","  AR $(@)")
+O_TO_A_CMD = "cmd_$@ = $(O_TO_A_STR)"
+O_TO_A_DO = @set -e; \
+       echo $(O_TO_A_DISP); \
+       $(O_TO_A) && \
+       echo $(O_TO_A_CMD) > $(call exe2cmd,$(@))
+
+-include .$(LIB).cmd
+
+#
+# Archive objects in .a file if needed
+#
+$(LIB): $(OBJS-y) $(DEP_$(LIB)) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$< -> $@ " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_A_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(O_TO_A_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(O_TO_A_DO))
+
+#
+# install lib in $(RTE_OUTPUT)/lib
+#
+$(RTE_OUTPUT)/lib/$(LIB): $(LIB)
+       @echo "  INSTALL-LIB $(LIB)"
+       @[ -d $(RTE_OUTPUT)/lib ] || mkdir -p $(RTE_OUTPUT)/lib
+       $(Q)cp -f $(LIB) $(RTE_OUTPUT)/lib
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+       $(Q)rm -rf $(LIB) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+         $(CMDS-all) $(INSTALL-FILES-all)
+       $(Q)rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+include $(RTE_SDK)/mk/internal/rte.compile-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.module.mk b/mk/rte.module.mk
new file mode 100644 (file)
index 0000000..3c95fae
--- /dev/null
@@ -0,0 +1,117 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+##### if sourced from kernel Kbuild system
+ifneq ($(KERNELRELEASE),)
+override EXTRA_CFLAGS = $(MODULE_CFLAGS) $(EXTRA_KERNEL_CFLAGS)
+obj-m          += $(MODULE).o
+ifneq ($(MODULE),$(notdir $(SRCS-y:%.c=%)))
+$(MODULE)-objs += $(notdir $(SRCS-y:%.c=%.o))
+endif
+
+##### if launched from rte build system
+else
+
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+_BUILD = $(MODULE).ko
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y) \
+       $(RTE_OUTPUT)/kmod/$(MODULE).ko
+_CLEAN = doclean
+
+SRCS_LINKS = $(addsuffix _link,$(SRCS-y))
+
+compare = $(strip $(subst $(1),,$(2)) $(subst $(2),,$(1)))
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+# Link all sources in build directory
+%_link: FORCE
+       $(if $(call compare,$(notdir $*),$*),\
+       @if [ ! -f $(notdir $(*)) ]; then ln -nfs $(*) . ; fi,\
+       @if [ ! -f $(notdir $(*)) ]; then ln -nfs $(SRCDIR)/$(*) . ; fi)
+
+# build module
+$(MODULE).ko: $(SRCS_LINKS)
+       @if [ ! -f $(notdir Makefile) ]; then ln -nfs $(SRCDIR)/Makefile . ; fi
+       @$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR)
+
+# install module in $(RTE_OUTPUT)/kmod
+$(RTE_OUTPUT)/kmod/$(MODULE).ko: $(MODULE).ko
+       @echo INSTALL-MODULE $(MODULE).ko
+       @[ -d $(RTE_OUTPUT)/kmod ] || mkdir -p $(RTE_OUTPUT)/kmod
+       @cp -f $(MODULE).ko $(RTE_OUTPUT)/kmod
+
+# install module
+modules_install:
+       @$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR) \
+               modules_install
+
+.PHONY: clean
+clean: _postclean
+
+# do a make clean and remove links
+.PHONY: doclean
+doclean:
+       @if [ ! -f $(notdir Makefile) ]; then ln -nfs $(SRCDIR)/Makefile . ; fi
+       $(Q)$(MAKE) -C $(RTE_KERNELDIR) M=$(CURDIR) O=$(RTE_KERNELDIR) clean
+       @$(foreach FILE,$(SRCS-y) $(SRCS-n) $(SRCS-),\
+               if [ -h $(notdir $(FILE)) ]; then rm -f $(notdir $(FILE)) ; fi ;)
+       @if [ -h $(notdir Makefile) ]; then rm -f $(notdir Makefile) ; fi
+       @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS) \
+               $(INSTALL-FILES-all)
+
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
+
+endif
diff --git a/mk/rte.obj.mk b/mk/rte.obj.mk
new file mode 100644 (file)
index 0000000..6005b39
--- /dev/null
@@ -0,0 +1,114 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/internal/rte.compile-pre.mk
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-pre.mk
+
+# VPATH contains at least SRCDIR
+VPATH += $(SRCDIR)
+
+ifneq ($(OBJ),)
+_BUILD = $(OBJ)
+else
+_BUILD = $(OBJS-y)
+endif
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y)
+_CLEAN = doclean
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+ifneq ($(OBJ),)
+exe2cmd = $(strip $(call dotfile,$(patsubst %,%.cmd,$(1))))
+
+O_TO_O = $(LD) -r -o $(OBJ) $(OBJS-y)
+O_TO_O_STR = $(subst ','\'',$(O_TO_O)) #'# fix syntax highlight
+O_TO_O_DISP =  $(if $(V),"$(O_TO_O_STR)","  LD $(@)")
+O_TO_O_CMD = "cmd_$@ = $(O_TO_O_STR)"
+O_TO_O_DO = @set -e; \
+       echo $(O_TO_O_DISP); \
+       $(O_TO_O) && \
+       echo $(O_TO_O_CMD) > $(call exe2cmd,$(@))
+
+-include .$(OBJ).cmd
+
+#
+# Archive objects in .a file if needed
+#
+$(OBJ): $(OBJS-y) FORCE
+       @[ -d $(dir $@) ] || mkdir -p $(dir $@)
+       $(if $(D),\
+               @echo -n "$< -> $@ " ; \
+               echo -n "file_missing=$(call boolean,$(file_missing)) " ; \
+               echo -n "cmdline_changed=$(call boolean,$(call cmdline_changed,$(O_TO_O_STR))) " ; \
+               echo -n "depfile_missing=$(call boolean,$(depfile_missing)) " ; \
+               echo "depfile_newer=$(call boolean,$(depfile_newer)) ")
+       $(if $(or \
+               $(file_missing),\
+               $(call cmdline_changed,$(O_TO_O_STR)),\
+               $(depfile_missing),\
+               $(depfile_newer)),\
+               $(O_TO_O_DO))
+endif
+
+#
+# Clean all generated files
+#
+.PHONY: clean
+clean: _postclean
+
+.PHONY: doclean
+doclean:
+       @rm -rf $(OBJ) $(OBJS-all) $(DEPS-all) $(DEPSTMP-all) \
+         $(CMDS-all) $(INSTALL-FILES-all)
+       @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+include $(RTE_SDK)/mk/internal/rte.compile-post.mk
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+include $(RTE_SDK)/mk/internal/rte.depdirs-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.sdkbuild.mk b/mk/rte.sdkbuild.mk
new file mode 100644 (file)
index 0000000..0a56063
--- /dev/null
@@ -0,0 +1,102 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# include rte.vars.mk if config file exists
+#
+ifeq (,$(wildcard $(RTE_OUTPUT)/.config))
+  $(error "need a make config first")
+else
+  include $(RTE_SDK)/mk/rte.vars.mk
+endif
+
+#
+# include .depdirs and define rules to order priorities between build
+# of directories.
+#
+-include $(RTE_OUTPUT)/.depdirs
+
+define depdirs_rule
+$(1): $(sort $(LOCAL_DEPDIRS-$(1)))
+endef
+
+$(foreach d,$(ROOTDIRS-y),$(eval $(call depdirs_rule,$(d))))
+
+#
+# build and clean targets
+#
+
+CLEANDIRS = $(addsuffix _clean,$(ROOTDIRS-y) $(ROOTDIRS-n) $(ROOTDIRS-))
+
+.PHONY: build
+build: $(ROOTDIRS-y)
+       @echo Build complete
+
+.PHONY: clean
+clean: $(CLEANDIRS)
+       @rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \
+               $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \
+               $(RTE_OUTPUT)/hostlib $(RTE_OUTPUT)/kmod
+       @[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include
+       @$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \
+               > $(RTE_OUTPUT)/include/rte_config.h
+       $(Q)$(MAKE) -f $(RTE_SDK)/Makefile gcovclean
+       @echo Clean complete
+
+.SECONDEXPANSION:
+.PHONY: $(ROOTDIRS-y)
+$(ROOTDIRS-y):
+       @[ -d $(BUILDDIR)/$@ ] || mkdir -p $(BUILDDIR)/$@
+       @echo "== Build $@"
+       $(Q)$(MAKE) S=$@ -f $(RTE_SRCDIR)/$@/Makefile -C $(BUILDDIR)/$@ all
+
+%_clean:
+       @echo "== Clean $*"
+       $(Q)if [ -f $(RTE_SRCDIR)/$*/Makefile -a -d $(BUILDDIR)/$* ]; then \
+               $(MAKE) S=$* -f $(RTE_SRCDIR)/$*/Makefile -C $(BUILDDIR)/$* clean ; \
+       fi
+
+RTE_MAKE_SUBTARGET ?= all
+
+%_sub: $(addsuffix _sub,$(FULL_DEPDIRS-$(*)))
+       @echo $(addsuffix _sub,$(FULL_DEPDIRS-$(*)))
+       @[ -d $(BUILDDIR)/$* ] || mkdir -p $(BUILDDIR)/$*
+       @echo "== Build $*"
+       $(Q)$(MAKE) S=$* -f $(RTE_SRCDIR)/$*/Makefile -C $(BUILDDIR)/$* \
+               $(RTE_MAKE_SUBTARGET)
+
+.PHONY: all
+all: build
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.sdkconfig.mk b/mk/rte.sdkconfig.mk
new file mode 100644 (file)
index 0000000..ed81c47
--- /dev/null
@@ -0,0 +1,109 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+INSTALL_CONFIGS := $(filter-out %~,\
+       $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\
+       $(wildcard $(RTE_SRCDIR)/config/defconfig_*)))
+INSTALL_TARGETS := $(addsuffix _install,$(INSTALL_CONFIGS))
+
+.PHONY: config
+ifeq ($(RTE_CONFIG_TEMPLATE),)
+config:
+       @echo -n "No template specified. Use T=template " ; \
+       echo "among the following list:" ; \
+       for t in $(INSTALL_CONFIGS); do \
+               echo "  $$t" ; \
+       done
+else
+config: $(RTE_OUTPUT)/include/rte_config.h $(RTE_OUTPUT)/Makefile
+       $(Q)$(MAKE) depdirs
+       @echo "Configuration done"
+endif
+
+ifdef NODOTCONF
+$(RTE_OUTPUT)/.config: ;
+else
+$(RTE_OUTPUT)/.config: $(RTE_CONFIG_TEMPLATE) FORCE
+       @[ -d $(RTE_OUTPUT) ] || mkdir -p $(RTE_OUTPUT)
+       $(Q)if [ "$(RTE_CONFIG_TEMPLATE)" != "" -a -f "$(RTE_CONFIG_TEMPLATE)" ]; then \
+               if ! cmp -s $(RTE_CONFIG_TEMPLATE) $(RTE_OUTPUT)/.config; then \
+                       cp $(RTE_CONFIG_TEMPLATE) $(RTE_OUTPUT)/.config ; \
+               fi ; \
+       else \
+               echo -n "No template specified. Use T=template " ; \
+               echo "among the following list:" ; \
+               for t in $(INSTALL_CONFIGS); do \
+                       echo "  $$t" ; \
+               done ; \
+       fi
+endif
+
+# generate a Makefile for this build directory
+# use a relative path so it will continue to work even if we move the directory
+SDK_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_SRCDIR)) \
+                               $(abspath $(RTE_OUTPUT)))
+OUTPUT_RELPATH=$(shell $(RTE_SDK)/scripts/relpath.sh $(abspath $(RTE_OUTPUT)) \
+                               $(abspath $(RTE_SRCDIR)))
+$(RTE_OUTPUT)/Makefile:
+       @[ -d $(RTE_OUTPUT) ] || mkdir -p $(RTE_OUTPUT)
+       $(Q)$(RTE_SDK)/scripts/gen-build-mk.sh $(SDK_RELPATH) $(OUTPUT_RELPATH) \
+               > $(RTE_OUTPUT)/Makefile
+
+# clean installed files, and generate a new config header file
+# if NODOTCONF variable is defined, don't try to rebuild .config
+$(RTE_OUTPUT)/include/rte_config.h: $(RTE_OUTPUT)/.config
+       $(Q)rm -rf $(RTE_OUTPUT)/include $(RTE_OUTPUT)/app \
+               $(RTE_OUTPUT)/hostapp $(RTE_OUTPUT)/lib \
+               $(RTE_OUTPUT)/hostlib
+       @[ -d $(RTE_OUTPUT)/include ] || mkdir -p $(RTE_OUTPUT)/include
+       $(Q)$(RTE_SDK)/scripts/gen-config-h.sh $(RTE_OUTPUT)/.config \
+               > $(RTE_OUTPUT)/include/rte_config.h
+
+# generate the rte_config.h
+.PHONY: headerconfig
+headerconfig: $(RTE_OUTPUT)/include/rte_config.h
+       @true
+
+# check that .config is present, and if yes, check that rte_config.h
+# is up to date
+.PHONY: checkconfig
+checkconfig:
+       @if [ ! -f $(RTE_OUTPUT)/.config ]; then \
+               echo "No .config in build directory"; \
+               exit 1; \
+       fi
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk \
+               headerconfig NODOTCONF=1
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.sdkdepdirs.mk b/mk/rte.sdkdepdirs.mk
new file mode 100644 (file)
index 0000000..bfda0b3
--- /dev/null
@@ -0,0 +1,65 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq (,$(wildcard $(RTE_OUTPUT)/.config))
+  $(error "need a make config first")
+endif
+ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile))
+  $(error "need a make config first")
+endif
+
+# use a "for" in a shell to process dependencies: we don't want this
+# task to be run in parallel.
+..PHONY: depdirs
+depdirs:
+       @rm -f $(RTE_OUTPUT)/.depdirs ; \
+       for d in $(ROOTDIRS-y); do \
+               if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \
+                       [ -d $(BUILDDIR)/$$d ] || mkdir -p $(BUILDDIR)/$$d ; \
+                       $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depdirs \
+                               >> $(RTE_OUTPUT)/.depdirs ; \
+               fi ; \
+       done
+
+.PHONY: depgraph
+depgraph:
+       @echo "digraph unix {" ; \
+       echo "    size=\"6,6\";" ; \
+       echo "    node [color=lightblue2, style=filled];" ; \
+       for d in $(ROOTDIRS-y); do \
+               echo "    \"root\" -> \"$$d\"" ; \
+               if [ -f $(RTE_SRCDIR)/$$d/Makefile ]; then \
+                       $(MAKE) S=$$d -f $(RTE_SRCDIR)/$$d/Makefile depgraph ; \
+               fi ; \
+       done ; \
+       echo "}"
diff --git a/mk/rte.sdkdoc.mk b/mk/rte.sdkdoc.mk
new file mode 100644 (file)
index 0000000..8d7a296
--- /dev/null
@@ -0,0 +1,73 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifdef O
+ifeq ("$(origin O)", "command line")
+$(error "Cannot use O= with doc target")
+endif
+endif
+
+ifdef T
+ifeq ("$(origin T)", "command line")
+$(error "Cannot use T= with doc target")
+endif
+endif
+
+.PHONY: doc
+doc:
+       $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images
+       $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf pdfdoc BASEDOCDIR=.. DOCDIR=rst
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doxydoc
+
+.PHONY: pdfdoc
+pdfdoc:
+       $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images
+       $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf $@ BASEDOCDIR=.. DOCDIR=rst
+
+.PHONY: doxydoc
+doxydoc:
+       $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images
+       $(Q)mkdir -p $(RTE_SDK)/doc/latex
+       $(Q)mkdir -p $(RTE_SDK)/doc/pdf/api
+       $(Q)cat $(RTE_SDK)/doc/gen/doxygen_pdf/Doxyfile | doxygen -
+       $(Q)mv $(RTE_SDK)/doc/images/*.pdf $(RTE_SDK)/doc/latex/
+       $(Q)sed -i s/darkgray/headercolour/g $(RTE_SDK)/doc/latex/doxygen.sty
+       $(Q)cp $(RTE_SDK)/doc/gen/doxygen_pdf/Makefile_doxygen $(RTE_SDK)/doc/latex/Makefile
+       $(Q)$(MAKE) -C $(RTE_SDK)/doc/latex
+       $(Q)cp $(RTE_SDK)/doc/latex/refman.pdf $(RTE_SDK)/doc/pdf/api/api.pdf
+
+.PHONY: docclean
+docclean:
+       $(Q)$(MAKE) -C $(RTE_SDK)/doc/images $@ BASEDOCDIR=.. DOCDIR=images
+       $(Q)$(MAKE) -f $(RTE_SDK)/doc/rst/Makefile -C $(RTE_SDK)/doc/pdf $@ BASEDOCDIR=.. DOCDIR=rst
+       $(Q)rm -rf $(RTE_SDK)/doc/pdf/api $(RTE_SDK)/doc/latex
diff --git a/mk/rte.sdkgcov.mk b/mk/rte.sdkgcov.mk
new file mode 100644 (file)
index 0000000..7ad1e74
--- /dev/null
@@ -0,0 +1,69 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifdef T
+ifeq ("$(origin T)", "command line")
+$(error "Cannot use T= with gcov target")
+endif
+endif
+
+ifeq (,$(wildcard $(RTE_OUTPUT)/.config))
+  $(error "need a make config first")
+else
+  include $(RTE_SDK)/mk/rte.vars.mk
+endif
+ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile))
+  $(error "need a make config first")
+endif
+
+INPUTDIR  = $(RTE_OUTPUT)
+OUTPUTDIR =  $(RTE_OUTPUT)/gcov
+
+.PHONY: gcovclean
+gcovclean:
+       $(Q)find $(INPUTDIR)/build -name "*.gcno" -o -name "*.gcda" -exec rm {} \;
+       $(Q)rm -rf $(OUTPUTDIR)
+
+.PHONY: gcov
+gcov:
+       $(Q)for APP in test ; do \
+               mkdir -p $(OUTPUTDIR)/$$APP ; cd $(OUTPUTDIR)/$$APP ; \
+               for FIC in `strings $(RTE_OUTPUT)/app/$$APP | grep gcda | sed s,gcda,o,` ; do \
+                       SUBDIR=`basename $$FIC`;\
+                       mkdir $$SUBDIR ;\
+                       cd $$SUBDIR ;\
+                       $(GCOV) $(RTE_OUTPUT)/app/$$APP -o $$FIC > gcov.log; \
+                       cd - >/dev/null;\
+               done ; \
+               cd - >/dev/null; \
+       done
diff --git a/mk/rte.sdkinstall.mk b/mk/rte.sdkinstall.mk
new file mode 100644 (file)
index 0000000..59e3416
--- /dev/null
@@ -0,0 +1,76 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifdef O
+ifeq ("$(origin O)", "command line")
+$(error "Cannot use O= with install target")
+endif
+endif
+
+# Targets to install can be specified in command line. It can be a
+# target name or a name containing jokers "*". Example:
+# x86_64-default-*-gcc
+ifndef T
+T=*
+endif
+
+#
+# install: build sdk for all supported targets
+#
+INSTALL_CONFIGS := $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\
+       $(wildcard $(RTE_SRCDIR)/config/defconfig_$(T)))
+INSTALL_TARGETS := $(addsuffix _install,\
+       $(filter-out %~,$(INSTALL_CONFIGS)))
+
+.PHONY: install
+install: $(INSTALL_TARGETS)
+
+%_install:
+       @echo ================== Installing $*
+       $(Q)$(MAKE) config T=$* O=$*
+       $(Q)$(MAKE) all O=$*
+
+#
+# uninstall: remove all built sdk
+#
+UNINSTALL_TARGETS := $(addsuffix _uninstall,\
+       $(filter-out %~,$(INSTALL_CONFIGS)))
+
+.PHONY: uninstall
+uninstall: $(UNINSTALL_TARGETS)
+
+%_uninstall:
+       @echo ================== Uninstalling $*
+       $(Q)rm -rf $*
+
+
diff --git a/mk/rte.sdkroot.mk b/mk/rte.sdkroot.mk
new file mode 100644 (file)
index 0000000..5b87b68
--- /dev/null
@@ -0,0 +1,158 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+MAKEFLAGS += --no-print-directory
+
+# define Q to '@' or not. $(Q) is used to prefix all shell commands to
+# be executed silently.
+Q=@
+ifdef V
+ifeq ("$(origin V)", "command line")
+Q=
+endif
+endif
+export Q
+
+ifeq ($(RTE_SDK),)
+$(error RTE_SDK is not defined)
+endif
+
+RTE_SRCDIR = $(CURDIR)
+export RTE_SRCDIR
+
+BUILDING_RTE_SDK := 1
+export BUILDING_RTE_SDK
+
+#
+# We can specify the configuration template when doing the "make
+# config". For instance: make config T=i686-default-baremetal-gcc
+#
+RTE_CONFIG_TEMPLATE :=
+ifdef T
+ifeq ("$(origin T)", "command line")
+RTE_CONFIG_TEMPLATE := $(RTE_SRCDIR)/config/defconfig_$(T)
+endif
+endif
+export RTE_CONFIG_TEMPLATE
+
+#
+# Default output is $(RTE_SRCDIR)/build
+# output files wil go in a separate directory
+#
+ifdef O
+ifeq ("$(origin O)", "command line")
+RTE_OUTPUT := $(abspath $(O))
+endif
+endif
+RTE_OUTPUT ?= $(RTE_SRCDIR)/build
+export RTE_OUTPUT
+
+# the directory where intermediate build files are stored, like *.o,
+# *.d, *.cmd, ...
+BUILDDIR = $(RTE_OUTPUT)/build
+export BUILDDIR
+
+export ROOTDIRS-y ROOTDIRS- ROOTDIRS-n
+
+.PHONY: default
+default: all
+
+.PHONY: config
+config:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk config
+
+.PHONY: test
+test:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk test
+
+.PHONY: fast_test ring_test mempool_test
+fast_test ring_test mempool_test:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktest.mk $@
+
+.PHONY: testall
+testall:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktestall.mk testall
+
+.PHONY: testimport
+testimport:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdktestall.mk testimport
+
+.PHONY: install
+install:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkinstall.mk install
+
+.PHONY: uninstall
+uninstall:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkinstall.mk uninstall
+
+.PHONY: doc
+doc:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doc
+
+.PHONY: pdfdoc
+pdfdoc:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk pdfdoc
+
+.PHONY: doxydoc
+doxydoc:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk doxydoc
+
+.PHONY: docclean
+docclean:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdoc.mk docclean
+
+.PHONY: depdirs
+depdirs:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdepdirs.mk depdirs
+
+.PHONY: depgraph
+depgraph:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkdepdirs.mk depgraph
+
+.PHONY: gcovclean
+gcovclean:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkgcov.mk gcovclean
+
+.PHONY: gcov
+gcov:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkgcov.mk gcov
+
+.PHONY: help
+help:
+       @sed -e '1,/.*==================================.*/ d' \
+               doc/rst/developers_reference/sdk_mkhelp.rst
+
+# all other build targets
+%:
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkconfig.mk checkconfig
+       $(Q)$(MAKE) -f $(RTE_SDK)/mk/rte.sdkbuild.mk $@
diff --git a/mk/rte.sdktest.mk b/mk/rte.sdktest.mk
new file mode 100644 (file)
index 0000000..22ccbe3
--- /dev/null
@@ -0,0 +1,66 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifeq (,$(wildcard $(RTE_OUTPUT)/.config))
+  $(error "need a make config first")
+else
+  include $(RTE_SDK)/mk/rte.vars.mk
+endif
+ifeq (,$(wildcard $(RTE_OUTPUT)/Makefile))
+  $(error "need a make config first")
+endif
+
+DATE := $(shell date '+%Y%m%d-%H%M')
+AUTOTEST_DIR := $(RTE_OUTPUT)/autotest-$(DATE)
+
+DIR := $(shell basename $(RTE_OUTPUT))
+
+#
+# test: launch auto-tests, very simple for now.
+#
+PHONY: test fast_test
+
+fast_test: BLACKLIST=-Ring,Mempool
+ring_test: WHITELIST=Ring
+mempool_test: WHITELIST=Mempool
+test fast_test ring_test mempool_test:
+       @mkdir -p $(AUTOTEST_DIR) ; \
+       cd $(AUTOTEST_DIR) ; \
+       if [ -f $(RTE_OUTPUT)/app/test ]; then \
+               python $(RTE_SDK)/app/test/autotest.py \
+                       $(RTE_OUTPUT)/app/test \
+                       $(DIR) $(RTE_TARGET) \
+                       $(BLACKLIST) $(WHITELIST); \
+       else \
+               echo "No test found, please do a 'make build' first, or specify O=" ; \
+       fi
diff --git a/mk/rte.sdktestall.mk b/mk/rte.sdktestall.mk
new file mode 100644 (file)
index 0000000..10f10d2
--- /dev/null
@@ -0,0 +1,65 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+ifdef O
+ifeq ("$(origin O)", "command line")
+$(error "Cannot use O= with testall target")
+endif
+endif
+
+# Targets to test can be specified in command line. It can be a
+# target name or a name containing jokers "*". Example:
+# x86_64-default-*-gcc
+ifndef T
+T=*
+endif
+
+#
+# testall: launch test for all supported targets
+#
+TESTALL_CONFIGS := $(patsubst $(RTE_SRCDIR)/config/defconfig_%,%,\
+       $(wildcard $(RTE_SRCDIR)/config/defconfig_$(T)))
+TESTALL_TARGETS := $(addsuffix _testall,\
+       $(filter-out %~,$(TESTALL_CONFIGS)))
+.PHONY: testall
+testall: $(TESTALL_TARGETS)
+
+%_testall:
+       @echo ================== Test $*
+       $(Q)$(MAKE) test O=$*
+
+#
+# import autotests in documentation
+#
+testimport:
+       $(Q)$(RTE_SDK)/scripts/import_autotest.sh $(TESTALL_CONFIGS)
diff --git a/mk/rte.subdir.mk b/mk/rte.subdir.mk
new file mode 100644 (file)
index 0000000..f0ae3fb
--- /dev/null
@@ -0,0 +1,114 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# .mk to build subdirectories
+#
+
+include $(RTE_SDK)/mk/internal/rte.install-pre.mk
+include $(RTE_SDK)/mk/internal/rte.clean-pre.mk
+include $(RTE_SDK)/mk/internal/rte.build-pre.mk
+
+CLEANDIRS = $(addsuffix _clean,$(DIRS-y) $(DIRS-n) $(DIRS-))
+
+VPATH += $(SRCDIR)
+_BUILD = $(DIRS-y)
+_INSTALL = $(INSTALL-FILES-y) $(SYMLINK-FILES-y)
+_CLEAN = $(CLEANDIRS)
+
+.PHONY: all
+all: install
+
+.PHONY: install
+install: build _postinstall
+
+_postinstall: build
+
+.PHONY: build
+build: _postbuild
+
+.SECONDEXPANSION:
+.PHONY: $(DIRS-y)
+$(DIRS-y):
+       @[ -d $(CURDIR)/$@ ] || mkdir -p $(CURDIR)/$@
+       @echo "== Build $S/$@"
+       @$(MAKE) S=$S/$@ -f $(SRCDIR)/$@/Makefile -C $(CURDIR)/$@ all
+
+.PHONY: clean
+clean: _postclean
+
+%_clean:
+       @echo "== Clean $S/$*"
+       @if [ -f $(SRCDIR)/$*/Makefile -a -d $(CURDIR)/$* ]; then \
+               $(MAKE) S=$S/$* -f $(SRCDIR)/$*/Makefile -C $(CURDIR)/$* clean ; \
+       fi
+       @rm -f $(_BUILD_TARGETS) $(_INSTALL_TARGETS) $(_CLEAN_TARGETS)
+
+#
+# include .depdirs and define rules to order priorities between build
+# of directories.
+#
+include $(RTE_OUTPUT)/.depdirs
+
+define depdirs_rule
+$(1): $(sort $(patsubst $(S)/%,%,$(LOCAL_DEPDIRS-$(S)/$(1))))
+endef
+
+$(foreach d,$(DIRS-y),$(eval $(call depdirs_rule,$(d))))
+
+
+# use a "for" in a shell to process dependencies: we don't want this
+# task to be run in parallel.
+.PHONY: depdirs
+depdirs:
+       @for d in $(DIRS-y); do \
+               if [ -f $(SRCDIR)/$$d/Makefile ]; then \
+                       $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depdirs ; \
+               fi ; \
+       done
+
+.PHONY: depgraph
+depgraph:
+       @for d in $(DIRS-y); do \
+               echo "    \"$(S)\" -> \"$(S)/$$d\"" ; \
+               if [ -f $(SRCDIR)/$$d/Makefile ]; then \
+                       $(MAKE) S=$S/$$d -f $(SRCDIR)/$$d/Makefile depgraph ; \
+               fi ; \
+       done
+
+include $(RTE_SDK)/mk/internal/rte.install-post.mk
+include $(RTE_SDK)/mk/internal/rte.clean-post.mk
+include $(RTE_SDK)/mk/internal/rte.build-post.mk
+
+.PHONY: FORCE
+FORCE:
diff --git a/mk/rte.vars.mk b/mk/rte.vars.mk
new file mode 100644 (file)
index 0000000..c56ee4e
--- /dev/null
@@ -0,0 +1,125 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# To be included at the beginning of all RTE user Makefiles. This
+# .mk will define the RTE environment variables by including the
+# config file of SDK. It also includes the config file from external
+# application if any.
+#
+
+ifeq ($(RTE_SDK),)
+$(error RTE_SDK is not defined)
+endif
+ifeq ($(wildcard $(RTE_SDK)),)
+$(error RTE_SDK variable points to an invalid location)
+endif
+
+# define Q to '@' or not. $(Q) is used to prefix all shell commands to
+# be executed silently.
+Q=@
+ifdef V
+ifeq ("$(origin V)", "command line")
+Q=
+endif
+endif
+export Q
+
+# if we are building SDK, only includes SDK configuration
+ifneq ($(BUILDING_RTE_SDK),)
+  include $(RTE_OUTPUT)/.config
+  # remove double-quotes from config names
+  RTE_ARCH := $(CONFIG_RTE_ARCH:"%"=%)
+  RTE_MACHINE := $(CONFIG_RTE_MACHINE:"%"=%)
+  RTE_EXEC_ENV := $(CONFIG_RTE_EXEC_ENV:"%"=%)
+  RTE_TOOLCHAIN := $(CONFIG_RTE_TOOLCHAIN:"%"=%)
+  RTE_TARGET := $(RTE_ARCH)-$(RTE_MACHINE)-$(RTE_EXEC_ENV)-$(RTE_TOOLCHAIN)
+  RTE_SDK_BIN := $(RTE_OUTPUT)
+endif
+
+# RTE_TARGET is deducted from config when we are building the SDK.
+# Else, when building an external app, RTE_TARGET must be specified
+# by the user.
+ifeq ($(RTE_TARGET),)
+$(error RTE_TARGET is not defined)
+endif
+
+ifeq ($(BUILDING_RTE_SDK),)
+# if we are building an external app/lib, include rte.extvars.mk that will
+# define RTE_OUTPUT, RTE_SRCDIR, RTE_EXTMK, RTE_SDK_BIN, (etc ...)
+include $(RTE_SDK)/mk/rte.extvars.mk
+endif
+
+ifeq ($(RTE_ARCH),)
+$(error RTE_ARCH is not defined)
+endif
+
+ifeq ($(RTE_MACHINE),)
+$(error RTE_MACHINE is not defined)
+endif
+
+ifeq ($(RTE_EXEC_ENV),)
+$(error RTE_EXEC_ENV is not defined)
+endif
+
+ifeq ($(RTE_TOOLCHAIN),)
+$(error RTE_TOOLCHAIN is not defined)
+endif
+
+# can be overriden by make command line or exported environment variable
+RTE_KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+
+export RTE_TARGET
+export RTE_ARCH
+export RTE_MACHINE
+export RTE_EXEC_ENV
+export RTE_TOOLCHAIN
+
+# SRCDIR is the current source directory
+ifdef S
+SRCDIR := $(abspath $(RTE_SRCDIR)/$(S))
+else
+SRCDIR := $(RTE_SRCDIR)
+endif
+
+# helper: return y if option is set to y, else return an empty string
+testopt = $(if $(strip $(subst y,,$(1)) $(subst $(1),,y)),,y)
+
+# helper: return an empty string if option is set, else return y
+not = $(if $(strip $(subst y,,$(1)) $(subst $(1),,y)),,y)
+
+ifneq ($(wildcard $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.vars.mk),)
+include $(RTE_SDK)/mk/target/$(RTE_TARGET)/rte.vars.mk
+else
+include $(RTE_SDK)/mk/target/generic/rte.vars.mk
+endif
diff --git a/mk/target/generic/rte.app.mk b/mk/target/generic/rte.app.mk
new file mode 100644 (file)
index 0000000..e1f3b66
--- /dev/null
@@ -0,0 +1,43 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# define Makefile targets that are specific to an environment.
+#
+include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.app.mk
+
+.PHONY: exec-env-appinstall
+target-appinstall: exec-env-appinstall
+
+.PHONY: exec-env-appclean
+target-appclean: exec-env-appclean
diff --git a/mk/target/generic/rte.vars.mk b/mk/target/generic/rte.vars.mk
new file mode 100644 (file)
index 0000000..343c5a4
--- /dev/null
@@ -0,0 +1,150 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# This .mk is the generic target rte.var.mk ; it includes .mk for
+# the specified machine, architecture, toolchain (compiler) and
+# executive environment.
+#
+
+#
+# machine:
+#
+#   - can define ARCH variable (overriden by cmdline value)
+#   - can define CROSS variable (overriden by cmdline value)
+#   - define MACHINE_CFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_LDFLAGS variable (overriden by cmdline value)
+#   - define MACHINE_ASFLAGS variable (overriden by cmdline value)
+#   - can define CPU_CFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_LDFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#   - can define CPU_ASFLAGS variable (overriden by cmdline value) that
+#     overrides the one defined in arch.
+#
+# examples for RTE_MACHINE: default, pc, bensley, tylesburg, ...
+#
+include $(RTE_SDK)/mk/machine/$(RTE_MACHINE)/rte.vars.mk
+
+#
+# arch:
+#
+#   - define ARCH variable (overriden by cmdline or by previous
+#     optional define in machine .mk)
+#   - define CROSS variable (overriden by cmdline or previous define
+#     in machine .mk)
+#   - define CPU_CFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_LDFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - define CPU_ASFLAGS variable (overriden by cmdline or previous
+#     define in machine .mk)
+#   - may override any previously defined variable
+#
+# examples for RTE_ARCH: i686, x86_64
+#
+include $(RTE_SDK)/mk/arch/$(RTE_ARCH)/rte.vars.mk
+
+#
+# toolchain:
+#
+#   - define CC, LD, AR, AS, ...
+#   - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+#   - may override any previously defined variable
+#
+# examples for RTE_TOOLCHAIN: gcc, icc
+#
+include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.vars.mk
+
+#
+# exec-env:
+#
+#   - define EXECENV_CFLAGS variable (overriden by cmdline)
+#   - define EXECENV_LDFLAGS variable (overriden by cmdline)
+#   - define EXECENV_ASFLAGS variable (overriden by cmdline)
+#   - may override any previously defined variable
+#
+# examples for RTE_EXEC_ENV: linuxapp, baremetal
+#
+include $(RTE_SDK)/mk/exec-env/$(RTE_EXEC_ENV)/rte.vars.mk
+
+# Don't set CFLAGS/LDFLAGS flags for kernel module, all flags are
+# provided by Kbuild framework.
+ifeq ($(KERNELRELEASE),)
+
+# merge all CFLAGS
+CFLAGS := $(CPU_CFLAGS) $(EXECENV_CFLAGS) $(TOOLCHAIN_CFLAGS) $(MACHINE_CFLAGS)
+CFLAGS += $(TARGET_CFLAGS)
+
+# merge all LDFLAGS
+LDFLAGS := $(CPU_LDFLAGS) $(EXECENV_LDFLAGS) $(TOOLCHAIN_LDFLAGS) $(MACHINE_LDFLAGS)
+LDFLAGS += $(TARGET_LDFLAGS)
+
+# merge all ASFLAGS
+ASFLAGS := $(CPU_ASFLAGS) $(EXECENV_ASFLAGS) $(TOOLCHAIN_ASFLAGS) $(MACHINE_ASFLAGS)
+ASFLAGS += $(TARGET_ASFLAGS)
+
+# add default include and lib paths
+CFLAGS += -I$(RTE_OUTPUT)/include
+LDFLAGS += -L$(RTE_OUTPUT)/lib
+
+# always include rte_config.h: the one in $(RTE_OUTPUT)/include is
+# the configuration of SDK when $(BUILDING_RTE_SDK) is true, or the
+# configuration of the application if $(BUILDING_RTE_SDK) is not
+# defined.
+ifeq ($(BUILDING_RTE_SDK),1)
+# building sdk
+CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
+CFLAGS += -include $(RTE_OUTPUT)/include/rte_warnings.h
+endif
+else
+# if we are building an external application, include SDK's lib and
+# includes too
+CFLAGS += -I$(RTE_SDK_BIN)/include
+ifneq ($(wildcard $(RTE_OUTPUT)/include/rte_config.h),)
+CFLAGS += -include $(RTE_OUTPUT)/include/rte_config.h
+endif
+CFLAGS += -include $(RTE_SDK_BIN)/include/rte_config.h
+ifeq ($(CONFIG_RTE_INSECURE_FUNCTION_WARNING),y)
+CFLAGS += -include $(RTE_SDK_BIN)/include/rte_warnings.h
+endif
+LDFLAGS += -L$(RTE_SDK_BIN)/lib
+endif
+
+export CFLAGS
+export LDFLAGS
+
+endif
diff --git a/mk/toolchain/gcc/rte.toolchain-compat.mk b/mk/toolchain/gcc/rte.toolchain-compat.mk
new file mode 100644 (file)
index 0000000..4e65122
--- /dev/null
@@ -0,0 +1,93 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# CPUID-related options
+#
+# This was added to support compiler versions which might not support all the
+# flags we need
+#
+
+#find out GCC version
+
+GCC_MAJOR_VERSION = $(shell gcc -dumpversion | cut -f1 -d.)
+
+# if GCC is not 4.x
+ifneq ($(GCC_MAJOR_VERSION),4)
+       MACHINE_CFLAGS =
+$(warning You are not using GCC 4.x. This is neither supported, nor tested.)
+
+
+else
+       GCC_MINOR_VERSION = $(shell gcc -dumpversion | cut -f2 -d.)
+
+# GCC graceful degradation
+# GCC 4.2.x - added support for generic target
+# GCC 4.3.x - added support for core2, ssse3, sse4.1, sse4.2
+# GCC 4.4.x - added support for avx, aes, pclmul
+# GCC 4.5.x - added support for atom
+# GCC 4.6.x - added support for corei7, corei7-avx
+# GCC 4.7.x - added support for fsgsbase, rdrnd, f16c, core-avx-i, core-avx2
+
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 7 && echo 1), 1)
+               MACHINE_CFLAGS := $(patsubst -march=core-avx-i,-march=corei7-avx,$(MACHINE_CFLAGS))
+               MACHINE_CFLAGS := $(patsubst -march=core-avx2,-march=corei7-avx,$(MACHINE_CFLAGS))
+       endif
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 6 && echo 1), 1)
+               MACHINE_CFLAGS := $(patsubst -march=corei7-avx,-march=core2 -maes -mpclmul -mavx,$(MACHINE_CFLAGS))
+               MACHINE_CFLAGS := $(patsubst -march=corei7,-march=core2 -maes -mpclmul,$(MACHINE_CFLAGS))
+       endif
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 5 && echo 1), 1)
+               MACHINE_CFLAGS := $(patsubst -march=atom,-march=core2 -mssse3,$(MACHINE_CFLAGS))
+       endif
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 4 && echo 1), 1)
+               MACHINE_CFLAGS := $(filter-out -mavx -mpclmul -maes,$(MACHINE_CFLAGS))
+       endif
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 3 && echo 1), 1)
+               MACHINE_CFLAGS := $(filter-out -msse% -mssse%,$(MACHINE_CFLAGS))
+               MACHINE_CFLAGS := $(patsubst -march=core2,-march=generic,$(MACHINE_CFLAGS))
+               MACHINE_CFLAGS += -msse3
+       endif
+       ifeq ($(shell test $(GCC_MINOR_VERSION) -lt 2 && echo 1), 1)
+               MACHINE_CFLAGS := $(filter-out -march% -mtune% -msse%,$(MACHINE_CFLAGS))
+       endif
+endif
+MACHINE_CFLAGS += $(addprefix -DRTE_MACHINE_CPUFLAG_,$(CPUFLAGS))
+
+# To strip whitespace
+comma:= ,
+empty:=
+space:= $(empty) $(empty)
+CPUFLAGSTMP1 := $(addprefix RTE_CPUFLAG_,$(CPUFLAGS))
+CPUFLAGSTMP2 := $(subst $(space),$(comma),$(CPUFLAGSTMP1))
+MACHINE_CFLAGS += -DRTE_COMPILE_TIME_CPUFLAGS=$(CPUFLAGSTMP2)
diff --git a/mk/toolchain/gcc/rte.vars.mk b/mk/toolchain/gcc/rte.vars.mk
new file mode 100644 (file)
index 0000000..d640515
--- /dev/null
@@ -0,0 +1,87 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# toolchain:
+#
+#   - define CC, LD, AR, AS, ... (overriden by cmdline value)
+#   - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+#
+# examples for RTE_TOOLCHAIN: gcc, icc
+#
+
+CC        = $(CROSS)gcc
+CPP       = $(CROSS)cpp
+# for now, we don't use as but nasm.
+# AS      = $(CROSS)as
+AS        = nasm
+AR        = $(CROSS)ar
+LD        = $(CROSS)ld
+OBJCOPY   = $(CROSS)objcopy
+OBJDUMP   = $(CROSS)objdump
+STRIP     = $(CROSS)strip
+READELF   = $(CROSS)readelf
+GCOV      = $(CROSS)gcov
+
+HOSTCC    = gcc
+HOSTAS    = as
+
+TOOLCHAIN_ASFLAGS =
+TOOLCHAIN_CFLAGS =
+TOOLCHAIN_LDFLAGS =
+
+ifeq ($(CONFIG_RTE_LIBRTE_GCOV),y)
+TOOLCHAIN_CFLAGS += --coverage
+TOOLCHAIN_LDFLAGS += --coverage
+ifeq (,$(findstring -O0,$(EXTRA_CFLAGS)))
+  $(warning "EXTRA_CFLAGS doesn't contains -O0, coverage will be inaccurate with optimizations enabled")
+endif
+endif
+
+WERROR_FLAGS := -W -Wall -Werror -Wstrict-prototypes -Wmissing-prototypes
+WERROR_FLAGS += -Wmissing-declarations -Wold-style-definition -Wpointer-arith
+WERROR_FLAGS += -Wcast-align -Wnested-externs -Wcast-qual
+WERROR_FLAGS += -Wformat-nonliteral -Wformat-security
+
+ifeq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+# These trigger warnings in newlib, so can't be used for baremetal
+WERROR_FLAGS += -Wundef -Wwrite-strings
+endif
+
+# process cpu flags
+include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk
+
+export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
+export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/mk/toolchain/icc/rte.toolchain-compat.mk b/mk/toolchain/icc/rte.toolchain-compat.mk
new file mode 100644 (file)
index 0000000..5540f86
--- /dev/null
@@ -0,0 +1,82 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# CPUID-related options
+#
+# This was added to support compiler versions which might not support all the
+# flags we need
+#
+
+# find out ICC version
+
+ICC_MAJOR_VERSION = $(shell icc -dumpversion | cut -f1 -d.)
+
+ifneq ($(ICC_MAJOR_VERSION),12)
+       MACHINE_CFLAGS = -xSSE3
+$(warning You are not using ICC 12.x. This is neither supported, nor tested.)
+
+else
+# proceed to adjust compiler flags
+
+       ICC_MINOR_VERSION = $(shell icc -dumpversion | cut -f2 -d.)
+
+# replace GCC flags with ICC flags
+       ifeq ($(shell test $(ICC_MINOR_VERSION) -lt 2 && echo 1), 1)
+               # Atom
+               MACHINE_CFLAGS := $(patsubst -march=atom,-xSSSE3_ATOM -march=atom,$(MACHINE_CFLAGS))
+               # nehalem/westmere
+               MACHINE_CFLAGS := $(patsubst -march=corei7,-xSSE4.2 -march=corei7,$(MACHINE_CFLAGS))
+               # sandy bridge
+               MACHINE_CFLAGS := $(patsubst -march=corei7-avx,-xAVX,$(MACHINE_CFLAGS))
+               # ivy bridge
+               MACHINE_CFLAGS := $(patsubst -march=core-avx-i,-xCORE-AVX-I,$(MACHINE_CFLAGS))
+               # remove westmere flags
+               MACHINE_CFLAGS := $(filter-out -mpclmul -maes,$(MACHINE_CFLAGS))
+       endif
+       ifeq ($(shell test $(ICC_MINOR_VERSION) -lt 1 && echo 1), 1)
+               # Atom
+               MACHINE_CFLAGS := $(patsubst -xSSSE3_ATOM,-xSSE3_ATOM,$(MACHINE_CFLAGS))
+               # remove march options
+               MACHINE_CFLAGS := $(patsubst -march=%,-xSSE3,$(MACHINE_CFLAGS))
+       endif
+endif
+MACHINE_CFLAGS += $(addprefix -DRTE_MACHINE_CPUFLAG_,$(CPUFLAGS))
+
+# To strip whitespace
+comma:= ,
+empty:=
+space:= $(empty) $(empty)
+CPUFLAGSTMP1 := $(addprefix RTE_CPUFLAG_,$(CPUFLAGS))
+CPUFLAGSTMP2 := $(subst $(space),$(comma),$(CPUFLAGSTMP1))
+MACHINE_CFLAGS += -DRTE_COMPILE_TIME_CPUFLAGS=$(CPUFLAGSTMP2)
diff --git a/mk/toolchain/icc/rte.vars.mk b/mk/toolchain/icc/rte.vars.mk
new file mode 100644 (file)
index 0000000..5eca8ac
--- /dev/null
@@ -0,0 +1,98 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# toolchain:
+#
+#   - define CC, LD, AR, AS, ... (overriden by cmdline value)
+#   - define TOOLCHAIN_CFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_LDFLAGS variable (overriden by cmdline value)
+#   - define TOOLCHAIN_ASFLAGS variable (overriden by cmdline value)
+#
+# examples for RTE_TOOLCHAIN: gcc, icc
+#
+
+# Warning: we do not use CROSS environment variable as icc is mainly a
+# x86->x86 compiler
+
+ifeq ($(KERNELRELEASE),)
+CC        = icc
+else
+CC        = gcc
+endif
+CPP       = cpp
+AS        = nasm
+AR        = ar
+LD        = ld
+OBJCOPY   = objcopy
+OBJDUMP   = objdump
+STRIP     = strip
+READELF   = readelf
+
+ifeq ($(KERNELRELEASE),)
+HOSTCC    = icc
+else
+HOSTCC    = gcc
+endif
+HOSTAS    = as
+
+TOOLCHAIN_CFLAGS =
+TOOLCHAIN_LDFLAGS =
+TOOLCHAIN_ASFLAGS =
+
+# Turn off some ICC warnings -
+#   Remark #271   : trailing comma is nonstandard
+#   Warning #1478 : function "<func_name>" (declared at line N of "<filename>")
+#                   was declared "deprecated"
+ifeq ($(CONFIG_RTE_EXEC_ENV),"linuxapp")
+WERROR_FLAGS := -Wall -Werror-all -w2 -diag-disable 271 -diag-warning 1478
+else
+
+# Turn off some ICC warnings -
+#   Remark #193   : zero used for undefined preprocessing identifier
+#                  (needed for newlib)
+#   Remark #271   : trailing comma is nonstandard
+#   Remark #1292  : attribute "warning" ignored ((warning ("the use of
+#                   `mktemp' is dangerous; use `mkstemp' instead"))));
+#                   (needed for newlib)
+#   Warning #1478 : function "<func_name>" (declared at line N of "<filename>")
+#                   was declared "deprecated"
+WERROR_FLAGS := -Wall -Werror-all -w2 -diag-disable 193,271,1292 \
+               -diag-warning 1478
+endif
+
+# process cpu flags
+include $(RTE_SDK)/mk/toolchain/$(RTE_TOOLCHAIN)/rte.toolchain-compat.mk
+
+export CC AS AR LD OBJCOPY OBJDUMP STRIP READELF
+export TOOLCHAIN_CFLAGS TOOLCHAIN_LDFLAGS TOOLCHAIN_ASFLAGS
diff --git a/scripts/Makefile b/scripts/Makefile
new file mode 100644 (file)
index 0000000..8557253
--- /dev/null
@@ -0,0 +1,38 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-y += testhost
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/scripts/depdirs-rule.sh b/scripts/depdirs-rule.sh
new file mode 100755 (executable)
index 0000000..3b0ea56
--- /dev/null
@@ -0,0 +1,97 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# This (obscure) bash script finds the smallest different path between
+# path1 and path2 given as command line argument. The given paths MUST
+# be relative paths, the script is not designed to work with absolute
+# paths.
+#
+# The script will then generate Makefile code that can be saved in a
+# file and included in build system.
+#
+# For instance:
+#   depdirs-rule.sh a/b/c/d a/b/e/f
+# Will print:
+#   FULL_DEPDIRS-a/b/c/d += a/b/e/f
+#   LOCAL_DEPDIRS-a/b/c += a/b/e
+#
+# The script returns 0 except if invalid arguments are given.
+#
+
+if [ $# -ne 2 ]; then
+       echo "Bad arguments"
+       echo "Usage:"
+       echo "  $0 path1 path2"
+       exit 1
+fi
+
+left1=${1%%/*}
+right1=${1#*/}
+prev_right1=$1
+prev_left1=
+
+left2=${2%%/*}
+right2=${2#*/}
+prev_right2=$2
+prev_left2=
+
+while [ "${right1}" != "" -a "${right2}" != "" ]; do
+
+       if [ "$left1" != "$left2" ]; then
+               break
+       fi
+
+       prev_left1=$left1
+       left1=$left1/${right1%%/*}
+       prev_right1=$right1
+       right1=${prev_right1#*/}
+       if [ "$right1" = "$prev_right1" ]; then
+               right1=""
+       fi
+
+       prev_left2=$left2
+       left2=$left2/${right2%%/*}
+       prev_right2=$right2
+       right2=${prev_right2#*/}
+       if [ "$right2" = "$prev_right2" ]; then
+               right2=""
+       fi
+done
+
+echo FULL_DEPDIRS-$1 += $2
+echo LOCAL_DEPDIRS-$left1 += $left2
+
+exit 0
diff --git a/scripts/gen-build-mk.sh b/scripts/gen-build-mk.sh
new file mode 100755 (executable)
index 0000000..d773210
--- /dev/null
@@ -0,0 +1,55 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# Auto-generate a Makefile in build directory
+# Args:
+#   $1: path of project src root
+#   $2: path of build dir (can be relative to $1)
+
+echo "# Automatically generated by gen-build-mk.sh"
+echo
+echo "ifdef O"
+echo "ifeq (\"\$(origin O)\", \"command line\")"
+echo "\$(error \"Cannot specify O= as you are already in a build directory\")"
+echo "endif"
+echo "endif"
+echo
+echo "MAKEFLAGS += --no-print-directory"
+echo
+echo "all:"
+echo " @\$(MAKE) -C $1 O=$2"
+echo
+echo "%::"
+echo " @\$(MAKE) -C $1 O=$2 \$@"
diff --git a/scripts/gen-config-h.sh b/scripts/gen-config-h.sh
new file mode 100755 (executable)
index 0000000..4d15e6f
--- /dev/null
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+grep CONFIG_ $1                                                         \
+| grep -v '^#'                                                  \
+| sed 's,CONFIG_\(.*\)=y.*$,#define \1 1,'                      \
+| sed 's,CONFIG_\(.*\)=n.*$,#undef \1,'                                 \
+| sed 's,CONFIG_\(.*\)=\(.*\)$,#define \1 \2,'                  \
+| sed 's,\# CONFIG_\(.*\) is not set$,#undef \1,'
diff --git a/scripts/import_autotest.sh b/scripts/import_autotest.sh
new file mode 100755 (executable)
index 0000000..3b3767b
--- /dev/null
@@ -0,0 +1,87 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# import autotests in documentation
+# called by rte.sdktestall.mk from RTE_SDK root directory
+# arguments are the list of targets
+#
+
+echo "This will overwrite current autotest results in doc/rst/test_report"
+echo "and in doc/images/ directories"
+echo -n "Are you sure ? [y/N] >"
+read ans
+if [ "$ans" != "y" -a "$ans" != "Y" ]; then
+       echo "Aborted"
+       exit 0
+fi
+
+rm doc/images/autotests/Makefile
+
+for t in $*; do
+       echo -------- $t
+       rm -rf doc/rst/test_report/autotests/$t
+
+       # no autotest dir, skip
+       if ! ls -d $t/autotest-*/*.rst 2> /dev/null > /dev/null; then
+               continue;
+       fi
+
+       for f in $t/autotest*/*.rst; do
+               if [ ! -f $f ]; then
+                       continue
+               fi
+               mkdir -p doc/rst/test_report/autotests/$t
+               cp $f doc/rst/test_report/autotests/$t
+       done
+       rm -rf doc/images/autotests/$t
+       for f in $t/autotest*/*.svg; do
+               if [ ! -f $f ]; then
+                       continue
+               fi
+               mkdir -p doc/images/autotests/$t
+               cp $f doc/images/autotests/$t
+               echo "SVG += `basename $f`" >> doc/images/autotests/$t/Makefile
+       done
+
+       if [ -f doc/images/autotests/$t/Makefile ]; then
+               echo >> doc/images/autotests/$t/Makefile
+               echo 'include $(RTE_SDK)/mk/rte.doc.mk' >> doc/images/autotests/$t/Makefile
+       fi
+
+       echo "DIRS += $t" >> doc/images/autotests/Makefile
+done
+
+echo 'include $(RTE_SDK)/mk/rte.doc.mk' >> doc/images/autotests/Makefile
diff --git a/scripts/relpath.sh b/scripts/relpath.sh
new file mode 100755 (executable)
index 0000000..9a3440b
--- /dev/null
@@ -0,0 +1,100 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# print the relative path of $1 from $2 directory
+# $1 and $2 MUST be absolute paths
+#
+
+if [ $# -ne 2 ]; then
+       echo "Bad arguments"
+       echo "Usage:"
+       echo "  $0 path1 path2"
+       exit 1
+fi
+
+REL1=${1#/}
+REL2=${2#/}
+
+left1=${REL1%%/*}
+right1=${REL1#*/}
+prev_right1=$REL1
+prev_left1=
+
+left2=${REL2%%/*}
+right2=${REL2#*/}
+prev_right2=$REL2
+prev_left2=
+
+while [ "${right1}" != "" -a "${right2}" != "" ]; do
+
+       if [ "$left1" != "$left2" ]; then
+               break
+       fi
+
+       prev_left1=$left1
+       left1=$left1/${right1%%/*}
+       prev_right1=$right1
+       right1=${prev_right1#*/}
+       if [ "$right1" = "$prev_right1" ]; then
+               right1=""
+       fi
+
+       prev_left2=$left2
+       left2=$left2/${right2%%/*}
+       prev_right2=$right2
+       right2=${prev_right2#*/}
+       if [ "$right2" = "$prev_right2" ]; then
+               right2=""
+       fi
+done
+
+if [ "${left1}" != "${left2}" ]; then
+       right2=${prev_right2}
+       right1=${prev_right1}
+fi
+
+while [ "${right2}" != "" ]; do
+       prefix=${prefix}../
+       prev_right2=$right2
+       right2=${right2#*/}
+       if [ "$right2" = "$prev_right2" ]; then
+               right2=""
+       fi
+done
+
+echo ${prefix}${right1}
+
+exit 0
diff --git a/scripts/test-framework.sh b/scripts/test-framework.sh
new file mode 100755 (executable)
index 0000000..56cb457
--- /dev/null
@@ -0,0 +1,133 @@
+#!/bin/sh
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+# script to check that dependancies are working in the framework
+# must be executed from root
+
+# do a first build
+make config T=x86_64-default-linuxapp-gcc O=deptest
+make -j8 O=deptest
+
+MOD_APP_TEST1=`stat deptest/app/test | grep Modify`
+MOD_APP_TEST_MEMPOOL1=`stat deptest/build/app/test/test_mempool.o | grep Modify`
+MOD_LIB_MEMPOOL1=`stat deptest/lib/librte_mempool.a | grep Modify`
+MOD_LIB_MBUF1=`stat deptest/lib/librte_mbuf.a | grep Modify`
+
+echo "----- touch mempool.c, and check that deps are updated"
+sleep 1
+touch lib/librte_mempool/rte_mempool.c
+make -j8 O=deptest
+
+MOD_APP_TEST2=`stat deptest/app/test | grep Modify`
+MOD_APP_TEST_MEMPOOL2=`stat deptest/build/app/test/test_mempool.o | grep Modify`
+MOD_LIB_MEMPOOL2=`stat deptest/lib/librte_mempool.a | grep Modify`
+MOD_LIB_MBUF2=`stat deptest/lib/librte_mbuf.a | grep Modify`
+
+if [ "${MOD_APP_TEST1}" = "${MOD_APP_TEST2}" ]; then
+       echo ${MOD_APP_TEST1} / ${MOD_APP_TEST2}
+       echo "Bad deps on deptest/app/test"
+       exit 1
+fi
+if [ "${MOD_APP_TEST_MEMPOOL1}" != "${MOD_APP_TEST_MEMPOOL2}" ]; then
+       echo "Bad deps on deptest/build/app/test/test_mempool.o"
+       exit 1
+fi
+if [ "${MOD_LIB_MEMPOOL1}" = "${MOD_LIB_MEMPOOL2}" ]; then
+       echo "Bad deps on deptest/lib/librte_mempool.a"
+       exit 1
+fi
+if [ "${MOD_LIB_MBUF1}" != "${MOD_LIB_MBUF2}" ]; then
+       echo "Bad deps on deptest/lib/librte_mbuf.a"
+       exit 1
+fi
+
+echo "----- touch mempool.h, and check that deps are updated"
+sleep 1
+touch lib/librte_mempool/rte_mempool.h
+make -j8 O=deptest
+
+MOD_APP_TEST3=`stat deptest/app/test | grep Modify`
+MOD_APP_TEST_MEMPOOL3=`stat deptest/build/app/test/test_mempool.o | grep Modify`
+MOD_LIB_MEMPOOL3=`stat deptest/lib/librte_mempool.a | grep Modify`
+MOD_LIB_MBUF3=`stat deptest/lib/librte_mbuf.a | grep Modify`
+
+if [ "${MOD_APP_TEST2}" = "${MOD_APP_TEST3}" ]; then
+       echo "Bad deps on deptest/app/test"
+       exit 1
+fi
+if [ "${MOD_APP_TEST_MEMPOOL2}" = "${MOD_APP_TEST_MEMPOOL3}" ]; then
+       echo "Bad deps on deptest/build/app/test/test_mempool.o"
+       exit 1
+fi
+if [ "${MOD_LIB_MEMPOOL2}" = "${MOD_LIB_MEMPOOL3}" ]; then
+       echo "Bad deps on deptest/lib/librte_mempool.a"
+       exit 1
+fi
+if [ "${MOD_LIB_MBUF2}" = "${MOD_LIB_MBUF3}" ]; then
+       echo "Bad deps on deptest/lib/librte_mbuf.a"
+       exit 1
+fi
+
+
+echo "----- change mempool.c's CFLAGS, and check that deps are updated"
+sleep 1
+make -j8 O=deptest CFLAGS_rte_mempool.o="-DDUMMY_TEST"
+
+MOD_APP_TEST4=`stat deptest/app/test | grep Modify`
+MOD_APP_TEST_MEMPOOL4=`stat deptest/build/app/test/test_mempool.o | grep Modify`
+MOD_LIB_MEMPOOL4=`stat deptest/lib/librte_mempool.a | grep Modify`
+MOD_LIB_MBUF4=`stat deptest/lib/librte_mbuf.a | grep Modify`
+
+if [ "${MOD_APP_TEST3}" = "${MOD_APP_TEST4}" ]; then
+       echo "Bad deps on deptest/app/test"
+       exit 1
+fi
+if [ "${MOD_APP_TEST_MEMPOOL3}" != "${MOD_APP_TEST_MEMPOOL4}" ]; then
+       echo "Bad deps on deptest/build/app/test/test_mempool.o"
+       exit 1
+fi
+if [ "${MOD_LIB_MEMPOOL3}" = "${MOD_LIB_MEMPOOL4}" ]; then
+       echo "Bad deps on deptest/lib/librte_mempool.a"
+       exit 1
+fi
+if [ "${MOD_LIB_MBUF3}" != "${MOD_LIB_MBUF4}" ]; then
+       echo "Bad deps on deptest/lib/librte_mbuf.a"
+       exit 1
+fi
+
+
+echo "----- Deps check ok"
+rm -rf deptest
+exit 0
diff --git a/scripts/testhost/Makefile b/scripts/testhost/Makefile
new file mode 100644 (file)
index 0000000..c2ac583
--- /dev/null
@@ -0,0 +1,50 @@
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+HOSTAPP = testhost
+
+HOST_CFLAGS += -I$(SRCDIR)
+
+# HOST_LDFLAGS +=
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-y := testhost.c
+
+include $(RTE_SDK)/mk/rte.hostapp.mk
diff --git a/scripts/testhost/testhost.c b/scripts/testhost/testhost.c
new file mode 100644 (file)
index 0000000..bf26822
--- /dev/null
@@ -0,0 +1,57 @@
+/*-
+ *   BSD LICENSE
+ * 
+ *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ * 
+ *   Redistribution and use in source and binary forms, with or without 
+ *   modification, are permitted provided that the following conditions 
+ *   are met:
+ * 
+ *     * Redistributions of source code must retain the above copyright 
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright 
+ *       notice, this list of conditions and the following disclaimer in 
+ *       the documentation and/or other materials provided with the 
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its 
+ *       contributors may be used to endorse or promote products derived 
+ *       from this software without specific prior written permission.
+ * 
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * 
+ *  version: DPDK.L.1.2.3-3
+ */
+
+#include <stdio.h>
+
+struct toto {
+       int x;
+       int y;
+};
+
+int main(int argc, char **argv)
+{
+       struct toto t[] = {
+               { .x = 1, .y = 2 },
+               { .x = 1, .y = 2 },
+               { .x = 1, .y = 2 },
+               { .x = 1, .y = 2 },
+       };
+
+       struct toto u[4];
+
+       printf("%zu %zu\n", sizeof(t), sizeof(u));
+
+       return 0;
+}
diff --git a/tools/setup.sh b/tools/setup.sh
new file mode 100755 (executable)
index 0000000..3726528
--- /dev/null
@@ -0,0 +1,420 @@
+#! /bin/bash
+
+#   BSD LICENSE
+# 
+#   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+#   All rights reserved.
+# 
+#   Redistribution and use in source and binary forms, with or without 
+#   modification, are permitted provided that the following conditions 
+#   are met:
+# 
+#     * Redistributions of source code must retain the above copyright 
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright 
+#       notice, this list of conditions and the following disclaimer in 
+#       the documentation and/or other materials provided with the 
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its 
+#       contributors may be used to endorse or promote products derived 
+#       from this software without specific prior written permission.
+# 
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# 
+#  version: DPDK.L.1.2.3-3
+
+#
+# Run with "source /path/to/setup.sh"
+#
+
+#
+# Change to DPDK directory ( <this-script's-dir>/.. ), and export it as RTE_SDK
+#
+cd $(dirname ${BASH_SOURCE[0]})/..
+export RTE_SDK=$PWD
+echo "------------------------------------------------------------------------------"
+echo " RTE_SDK exported as $RTE_SDK"
+echo "------------------------------------------------------------------------------"
+
+#
+# Application EAL parameters for setting memory options (amount/channels/ranks).
+#
+EAL_PARAMS='-n 4'
+
+#
+# Sets QUIT variable so script will finish.
+#
+quit()
+{
+       QUIT=$1
+}
+
+#
+# Sets up envronment variables for ICC.
+#
+setup_icc()
+{
+       DEFAULT_PATH=/opt/intel/bin/iccvars.sh
+       param=$1
+       shpath=`which iccvars.sh 2> /dev/null`
+       if [ $? -eq 0 ] ; then
+               echo "Loading iccvars.sh from $shpath for $param"
+               source $shpath $param
+       elif [ -f $DEFAULT_PATH ] ; then
+               echo "Loading iccvars.sh from $DEFAULT_PATH for $param"
+               source $DEFAULT_PATH $param
+       else
+               echo "## ERROR: cannot find 'iccvars.sh' script to set up ICC."
+               echo "##     To fix, please add the directory that contains"
+               echo "##     iccvars.sh  to your 'PATH' environment variable."
+               quit
+       fi
+}
+
+#
+# Sets RTE_TARGET and does a "make install".
+#
+setup_target()
+{
+       option=$1
+       export RTE_TARGET=${TARGETS[option]}
+
+       compiler=${RTE_TARGET##*-}
+       if [ "$compiler" == "icc" ] ; then
+               platform=${RTE_TARGET%%-*}
+               if [ "$platform" == "x86_64" ] ; then
+                       setup_icc intel64
+               else
+                       setup_icc ia32
+               fi
+       fi
+       if [ "$QUIT" == "0" ] ; then
+               make install T=${RTE_TARGET}
+       fi
+       echo "------------------------------------------------------------------------------"
+       echo " RTE_TARGET exported as $RTE_TARGET"
+       echo "------------------------------------------------------------------------------"
+}
+
+#
+# Uninstall all targets.
+#
+uninstall_targets()
+{
+       make uninstall
+}
+
+#
+# Creates hugepage filesystem.
+#
+create_mnt_huge()
+{
+       echo "Creating /mnt/huge and mounting as hugetlbfs"
+       sudo mkdir -p /mnt/huge
+
+       grep -s '/mnt/huge' /proc/mounts > /dev/null
+       if [ $? -ne 0 ] ; then
+               sudo mount -t hugetlbfs nodev /mnt/huge
+       fi
+}
+
+#
+# Removes hugepage filesystem.
+#
+remove_mnt_huge()
+{
+       echo "Unmounting /mnt/huge and removing directory"
+       grep -s '/mnt/huge' /proc/mounts > /dev/null
+       if [ $? -eq 0 ] ; then
+               sudo umount /mnt/huge
+       fi
+
+       if [ -d /mnt/huge ] ; then
+               sudo rm -R /mnt/huge
+       fi
+}
+
+#
+# Unloads igb_uio.ko.
+#
+remove_igb_uio_module()
+{
+       echo "Unloading any existing DPDK UIO module"
+       /sbin/lsmod | grep -s igb_uio > /dev/null
+       if [ $? -eq 0 ] ; then
+               sudo /sbin/rmmod igb_uio
+       fi
+}
+
+#
+# Loads new igb_uio.ko (and uio module if needed).
+#
+load_igb_uio_module()
+{
+       if [ ! -f $RTE_SDK/$RTE_TARGET/kmod/igb_uio.ko ];then
+               echo "## ERROR: Target does not have the DPDK UIO Kernel Module."
+               echo "       To fix, please try to rebuild target."
+               return
+       fi
+
+       remove_igb_uio_module
+
+       /sbin/lsmod | grep -s uio > /dev/null
+       if [ $? -ne 0 ] ; then
+               if [ -f /lib/modules/$(uname -r)/kernel/drivers/uio/uio.ko ] ; then
+                       echo "Loading uio module"
+                       sudo /sbin/modprobe uio
+               fi
+       fi
+
+       # UIO may be compiled into kernel, so it may not be an error if it can't
+       # be loaded.
+
+       echo "Loading DPDK UIO module"
+       sudo /sbin/insmod $RTE_SDK/$RTE_TARGET/kmod/igb_uio.ko
+       if [ $? -ne 0 ] ; then
+               echo "## ERROR: Could not load kmod/igb_uio.ko."
+               quit
+       fi
+}
+
+#
+# Removes all reserved hugepages.
+#
+clear_huge_pages()
+{
+       echo > .echo_tmp
+       for d in /sys/devices/system/node/node? ; do
+               echo "echo 0 > $d/hugepages/hugepages-2048kB/nr_hugepages" >> .echo_tmp
+       done
+       echo "Removing currently reserved hugepages"
+       sudo sh .echo_tmp
+       rm -f .echo_tmp
+
+       remove_mnt_huge
+}
+
+#
+# Creates hugepages.
+#
+set_non_numa_pages()
+{
+       clear_huge_pages
+
+       echo ""
+       echo "  Input the number of 2MB pages"
+       echo "  Example: to have 128MB of hugepages available, enter '64' to"
+       echo "  reserve 64 * 2MB pages"
+       echo -n "Number of pages: "
+       read Pages
+
+       echo "echo $Pages > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages" > .echo_tmp
+
+       echo "Reserving hugepages"
+       sudo sh .echo_tmp
+       rm -f .echo_tmp
+
+       create_mnt_huge
+}
+
+#
+# Creates hugepages on specific NUMA nodes.
+#
+set_numa_pages()
+{
+       clear_huge_pages
+
+       echo ""
+       echo "  Input the number of 2MB pages for each node"
+       echo "  Example: to have 128MB of hugepages available per node,"
+       echo "  enter '64' to reserve 64 * 2MB pages on each node"
+
+       echo > .echo_tmp
+       for d in /sys/devices/system/node/node? ; do
+               node=$(basename $d)
+               echo -n "Number of pages for $node: "
+               read Pages
+               echo "echo $Pages > $d/hugepages/hugepages-2048kB/nr_hugepages" >> .echo_tmp
+       done
+       echo "Reserving hugepages"
+       sudo sh .echo_tmp
+       rm -f .echo_tmp
+
+       create_mnt_huge
+}
+
+#
+# Run unit test application.
+#
+run_test_app()
+{
+       echo ""
+       echo "  Enter hex bitmask of cores to execute test app on"
+       echo "  Example: to execute app on cores 0 to 7, enter 0xff"
+       echo -n "bitmask: "
+       read Bitmask
+       echo "Launching app"
+       sudo ${RTE_TARGET}/app/test -c $Bitmask $EAL_PARAMS
+}
+
+#
+# Run unit testpmd application.
+#
+run_testpmd_app()
+{
+       echo ""
+       echo "  Enter hex bitmask of cores to execute testpmd app on"
+       echo "  Example: to execute app on cores 0 to 7, enter 0xff"
+       echo -n "bitmask: "
+       read Bitmask
+       echo "Launching app"
+       sudo ${RTE_TARGET}/app/testpmd -c $Bitmask $EAL_PARAMS -- -i
+}
+
+#
+# Print hugepage information.
+#
+grep_meminfo()
+{
+       grep -i huge /proc/meminfo
+}
+
+#
+# List all hugepage file references
+#
+ls_mnt_huge()
+{
+       ls -lh /mnt/huge
+}
+
+#
+# Options for building a target. Note that this step MUST be first as it sets
+# up TARGETS[] starting from 1, and this is accessed in setup_target using the
+# user entered option.
+#
+step1_func()
+{
+       TITLE="Select the DPDK environment to build"
+       CONFIG_NUM=1
+       for cfg in config/defconfig_* ; do
+               cfg=${cfg/config\/defconfig_/}
+               TEXT[$CONFIG_NUM]="$cfg"
+               TARGETS[$CONFIG_NUM]=$cfg
+               FUNC[$CONFIG_NUM]="setup_target"
+               let "CONFIG_NUM+=1"
+       done
+}
+
+#
+# Options for setting up environment.
+#
+step2_func()
+{
+       TITLE="Setup linuxapp environment"
+
+       TEXT[1]="Insert IGB UIO module"
+       FUNC[1]="load_igb_uio_module"
+
+       TEXT[2]="Setup hugepage mappings for non-NUMA systems"
+       FUNC[2]="set_non_numa_pages"
+
+       TEXT[3]="Setup hugepage mappings for NUMA systems"
+       FUNC[3]="set_numa_pages"
+}
+
+#
+# Options for running applications.
+#
+step3_func()
+{
+       TITLE="Run test application for linuxapp environment"
+
+       TEXT[1]="Run test application (\$RTE_TARGET/app/test)"
+       FUNC[1]="run_test_app"
+
+       TEXT[2]="Run testpmd application in interactive mode (\$RTE_TARGET/app/testpmd)"
+       FUNC[2]="run_testpmd_app"
+}
+
+#
+# Other options
+#
+step4_func()
+{
+       TITLE="Other tools"
+
+       TEXT[1]="List hugepage info from /proc/meminfo"
+       FUNC[1]="grep_meminfo"
+
+       TEXT[2]="List hugepage files in /mnt/huge"
+       FUNC[2]="ls_mnt_huge"
+}
+
+#
+# Options for cleaning up the system
+#
+step5_func()
+{
+       TITLE="Uninstall and system cleanup"
+
+       TEXT[1]="Uninstall all targets"
+       FUNC[1]="uninstall_targets"
+
+       TEXT[2]="Remove IGB UIO module"
+       FUNC[2]="remove_igb_uio_module"
+
+       TEXT[3]="Remove hugepage mappings"
+       FUNC[3]="clear_huge_pages"
+}
+
+STEPS[1]="step1_func"
+STEPS[2]="step2_func"
+STEPS[3]="step3_func"
+STEPS[4]="step4_func"
+STEPS[5]="step5_func"
+
+QUIT=0
+
+while [ "$QUIT" == "0" ]; do
+       OPTION_NUM=1
+
+       for s in $(seq ${#STEPS[@]}) ; do
+               ${STEPS[s]}
+
+               echo "----------------------------------------------------------"
+               echo " Step $s: ${TITLE}"
+               echo "----------------------------------------------------------"
+
+               for i in $(seq ${#TEXT[@]}) ; do
+                       echo "[$OPTION_NUM] ${TEXT[i]}"
+                       OPTIONS[$OPTION_NUM]=${FUNC[i]}
+                       let "OPTION_NUM+=1"
+               done
+
+               # Clear TEXT and FUNC arrays before next step
+               unset TEXT
+               unset FUNC
+
+               echo ""
+       done
+
+       echo "[$OPTION_NUM] Exit Script"
+       OPTIONS[$OPTION_NUM]="quit"
+       echo ""
+       echo -n "Option: "
+       read our_entry
+       echo ""
+       ${OPTIONS[our_entry]} ${our_entry}
+       echo
+       echo -n "Press enter to continue ..."; read
+done