X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Fe1000_ethdev.h;h=a667a1aefa4a9fbd3adccfc751fb4a91234c24b8;hb=4b90a3ff26c5a2def84e06cd67c3792e5d313c75;hp=c451faa556b042059317a2dc340b838244b2cf36;hpb=d15fcf76c8b76e12c4050609cd31927ee0864c5a;p=dpdk.git diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index c451faa556..a667a1aefa 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -108,6 +108,33 @@ ETH_RSS_IPV6_TCP_EX | \ ETH_RSS_IPV6_UDP_EX) +/* + * Maximum number of Ring Descriptors. + * + * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring + * desscriptors should meet the following condition: + * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0 + */ +#define E1000_MIN_RING_DESC 32 +#define E1000_MAX_RING_DESC 4096 + +/* + * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be + * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. + * This will also optimize cache line size effect. + * H/W supports up to cache line size 128. + */ +#define E1000_ALIGN 128 + +#define IGB_RXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_rx_desc)) +#define IGB_TXD_ALIGN (E1000_ALIGN / sizeof(union e1000_adv_tx_desc)) + +#define EM_RXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_rx_desc)) +#define EM_TXD_ALIGN (E1000_ALIGN / sizeof(struct e1000_data_desc)) + +#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET +#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET + /* structure for interrupt relative data */ struct e1000_interrupt { uint32_t flags; @@ -229,8 +256,12 @@ struct e1000_adapter { struct e1000_vfta shadow_vfta; struct e1000_vf_info *vfdata; struct e1000_filter_info filter; + bool stopped; }; +#define E1000_DEV_PRIVATE(adapter) \ + ((struct e1000_adapter *)adapter) + #define E1000_DEV_PRIVATE_TO_HW(adapter) \ (&((struct e1000_adapter *)adapter)->hw) @@ -255,6 +286,7 @@ struct e1000_adapter { void eth_igb_tx_queue_release(void *txq); void eth_igb_rx_queue_release(void *rxq); void igb_dev_clear_queues(struct rte_eth_dev *dev); +void igb_dev_free_queues(struct rte_eth_dev *dev); int eth_igb_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, @@ -302,6 +334,12 @@ void igb_pf_mbx_process(struct rte_eth_dev *eth_dev); int igb_pf_host_configure(struct rte_eth_dev *eth_dev); +void igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + /* * RX/TX EM function prototypes */ @@ -309,6 +347,7 @@ void eth_em_tx_queue_release(void *txq); void eth_em_rx_queue_release(void *rxq); void em_dev_clear_queues(struct rte_eth_dev *dev); +void em_dev_free_queues(struct rte_eth_dev *dev); int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, @@ -337,4 +376,12 @@ uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); +void em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo); + +void em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo); + +void igb_pf_host_uninit(struct rte_eth_dev *dev); + #endif /* _E1000_ETHDEV_H_ */