git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mbuf: generic support for TCP segmentation offload
[dpdk.git]
/
lib
/
librte_pmd_ixgbe
/
ixgbe_rxtx.c
diff --git
a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index
d345885
..
2df3385
100644
(file)
--- a/
lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/
lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@
-90,6
+90,12
@@
ETH_RSS_IPV6_UDP | \
ETH_RSS_IPV6_UDP_EX)
ETH_RSS_IPV6_UDP | \
ETH_RSS_IPV6_UDP_EX)
+/* Bit Mask to indicate what bits required for building TX context */
+#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_VLAN_PKT | \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK)
+
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
static inline struct rte_mbuf *
rte_rxmbuf_alloc(struct rte_mempool *mp)
{
@@
-374,7
+380,7
@@
ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
if (ol_flags & PKT_TX_IP_CKSUM) {
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
if (ol_flags & PKT_TX_IP_CKSUM) {
type_tucmd_mlhl = IXGBE_ADVTXD_TUCMD_IPV4;
- cmp_mask |= TX_MAC_LEN_CMP_MASK;
+ cmp_mask |= TX_MAC
IP
_LEN_CMP_MASK;
}
/* Specify which HW CTX to upload. */
}
/* Specify which HW CTX to upload. */
@@
-540,6
+546,13
@@
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
union ixgbe_vlan_macip vlan_macip_lens;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
union ixgbe_vlan_macip vlan_macip_lens;
+ union {
+ uint16_t u16;
+ struct {
+ uint16_t l3_len:9;
+ uint16_t l2_len:7;
+ };
+ } l2_l3_len;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
@@
-565,25
+578,28
@@
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
ixgbe_xmit_cleanup(txq);
}
ixgbe_xmit_cleanup(txq);
}
+ rte_prefetch0(&txe->mbuf->pool);
+
/* TX loop */
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
pkt_len = tx_pkt->pkt_len;
/* TX loop */
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
new_ctx = 0;
tx_pkt = *tx_pkts++;
pkt_len = tx_pkt->pkt_len;
- RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
-
/*
* Determine how many (if any) context descriptors
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
/*
* Determine how many (if any) context descriptors
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
- vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
/* If hardware offload required */
/* If hardware offload required */
- tx_ol_req = ol_flags &
PKT
_TX_OFFLOAD_MASK;
+ tx_ol_req = ol_flags &
IXGBE
_TX_OFFLOAD_MASK;
if (tx_ol_req) {
if (tx_ol_req) {
+ l2_l3_len.l2_len = tx_pkt->l2_len;
+ l2_l3_len.l3_len = tx_pkt->l3_len;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_l3_len = l2_l3_len.u16;
+
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
vlan_macip_lens.data);
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
vlan_macip_lens.data);
@@
-720,7
+736,7
@@
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
&txr[tx_id];
txn = &sw_ring[txe->next_id];
&txr[tx_id];
txn = &sw_ring[txe->next_id];
-
RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf
);
+
rte_prefetch0(&txn->mbuf->pool
);
if (txe->mbuf != NULL) {
rte_pktmbuf_free_seg(txe->mbuf);
if (txe->mbuf != NULL) {
rte_pktmbuf_free_seg(txe->mbuf);
@@
-749,6
+765,7
@@
ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
do {
txd = &txr[tx_id];
txn = &sw_ring[txe->next_id];
do {
txd = &txr[tx_id];
txn = &sw_ring[txe->next_id];
+ rte_prefetch0(&txn->mbuf->pool);
if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
if (txe->mbuf != NULL)
rte_pktmbuf_free_seg(txe->mbuf);
@@
-815,7
+832,7
@@
end_of_tx:
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
{
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
{
- uint
16
_t pkt_flags;
+ uint
64
_t pkt_flags;
static uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
static uint64_t ip_pkt_types_map[16] = {
0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
@@
-832,7
+849,7
@@
rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
};
#ifdef RTE_LIBRTE_IEEE1588
};
#ifdef RTE_LIBRTE_IEEE1588
- static uint
32
_t ip_pkt_etqf_map[8] = {
+ static uint
64
_t ip_pkt_etqf_map[8] = {
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
0, 0, 0, PKT_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
@@
-901,7
+918,7
@@
ixgbe_rx_scan_hw_ring(struct igb_rx_queue *rxq)
struct igb_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
struct igb_rx_entry *rxep;
struct rte_mbuf *mb;
uint16_t pkt_len;
- uint
16
_t pkt_flags;
+ uint
64
_t pkt_flags;
int s[LOOK_AHEAD], nb_dd;
int i, j, nb_rx = 0;
int s[LOOK_AHEAD], nb_dd;
int i, j, nb_rx = 0;
@@
-1333,7
+1350,7
@@
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
uint16_t nb_rx;
uint16_t nb_hold;
uint16_t data_len;
- uint
16
_t pkt_flags;
+ uint
64
_t pkt_flags;
nb_rx = 0;
nb_hold = 0;
nb_rx = 0;
nb_hold = 0;
@@
-1509,9
+1526,9
@@
ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
- pkt_flags = (
uint16_t)(
pkt_flags |
+ pkt_flags = (pkt_flags |
rx_desc_status_to_pkt_flags(staterr));
rx_desc_status_to_pkt_flags(staterr));
- pkt_flags = (
uint16_t)(
pkt_flags |
+ pkt_flags = (pkt_flags |
rx_desc_error_to_pkt_flags(staterr));
first_seg->ol_flags = pkt_flags;
rx_desc_error_to_pkt_flags(staterr));
first_seg->ol_flags = pkt_flags;
@@
-2189,6
+2206,9
@@
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
*/
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
*/
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
+#ifdef RTE_IXGBE_INC_VECTOR
+ ixgbe_rxq_vec_setup(rxq);
+#endif
/* Check if pre-conditions are satisfied, and no Scattered Rx */
if (!use_def_burst_func && !dev->data->scattered_rx) {
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
/* Check if pre-conditions are satisfied, and no Scattered Rx */
if (!use_def_burst_func && !dev->data->scattered_rx) {
#ifdef RTE_LIBRTE_IXGBE_RX_ALLOW_BULK_ALLOC
@@
-2201,7
+2221,6
@@
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
if (!ixgbe_rx_vec_condition_check(dev)) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
"sure RX burst size no less than 32.");
if (!ixgbe_rx_vec_condition_check(dev)) {
PMD_INIT_LOG(INFO, "Vector rx enabled, please make "
"sure RX burst size no less than 32.");
- ixgbe_rxq_vec_setup(rxq);
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
}
#endif
dev->rx_pkt_burst = ixgbe_recv_pkts_vec;
}
#endif
@@
-3119,6
+3138,7
@@
ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
struct ixgbe_hw *hw;
enum rte_eth_nb_pools num_pools;
uint32_t mrqc, vt_ctl, vlanctrl;
struct ixgbe_hw *hw;
enum rte_eth_nb_pools num_pools;
uint32_t mrqc, vt_ctl, vlanctrl;
+ uint32_t vmolr = 0;
int i;
PMD_INIT_FUNC_TRACE();
int i;
PMD_INIT_FUNC_TRACE();
@@
-3141,6
+3161,11
@@
ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vt_ctl);
+ for (i = 0; i < (int)num_pools; i++) {
+ vmolr = ixgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
+ }
+
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */