X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Fem_rxtx.c;h=70d398f8b307af81feb2f90a884b54e4e53bc3ba;hb=e7dc3c78ddcbace5d07ee8f9416da3ef15926612;hp=ba7e3a9da73fa3a49d278e924e337c5605a81e4b;hpb=7869536f3f8edace05043be6f322b835702b201c;p=dpdk.git diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c index ba7e3a9da7..70d398f8b3 100644 --- a/lib/librte_pmd_e1000/em_rxtx.c +++ b/lib/librte_pmd_e1000/em_rxtx.c @@ -90,8 +90,7 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) } #define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + \ - (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr)) + (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) @@ -169,7 +168,7 @@ union em_vlan_macip { * Structure to check if new context need be built */ struct em_ctx_info { - uint16_t flags; /**< ol_flags related to context build. */ + uint64_t flags; /**< ol_flags related to context build. */ uint32_t cmp_mask; /**< compare mask */ union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ }; @@ -239,7 +238,7 @@ struct em_tx_queue { static inline void em_set_xmit_ctx(struct em_tx_queue* txq, volatile struct e1000_context_desc *ctx_txd, - uint16_t flags, + uint64_t flags, union em_vlan_macip hdrlen) { uint32_t cmp_mask, cmd_len; @@ -305,7 +304,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq, * or create a new context descriptor. */ static inline uint32_t -what_ctx_update(struct em_tx_queue *txq, uint16_t flags, +what_ctx_update(struct em_tx_queue *txq, uint64_t flags, union em_vlan_macip hdrlen) { /* If match with the current context */ @@ -340,8 +339,7 @@ em_xmit_cleanup(struct em_tx_queue *txq) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" - "(port=%d queue=%d)", - desc_to_clean_to, + "(port=%d queue=%d)", desc_to_clean_to, txq->port_id, txq->queue_id); /* Failed to clean any descriptors, better luck next time */ return -(1); @@ -357,9 +355,9 @@ em_xmit_cleanup(struct em_tx_queue *txq) PMD_TX_FREE_LOG(DEBUG, "Cleaning %4u TX descriptors: %4u to %4u " - "(port=%d queue=%d)", - nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to, - txq->port_id, txq->queue_id); + "(port=%d queue=%d)", nb_tx_to_clean, + last_desc_cleaned, desc_to_clean_to, txq->port_id, + txq->queue_id); /* * The last descriptor to clean is done, so that means all the @@ -378,7 +376,7 @@ em_xmit_cleanup(struct em_tx_queue *txq) } static inline uint32_t -tx_desc_cksum_flags_to_upper(uint16_t ol_flags) +tx_desc_cksum_flags_to_upper(uint64_t ol_flags) { static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8}; static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8}; @@ -404,12 +402,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint32_t popts_spec; uint32_t cmd_type_len; uint16_t slen; - uint16_t ol_flags; + uint64_t ol_flags; uint16_t tx_id; uint16_t tx_last; uint16_t nb_tx; uint16_t nb_used; - uint16_t tx_ol_req; + uint64_t tx_ol_req; uint32_t ctx; uint32_t new_ctx; union em_vlan_macip hdrlen; @@ -439,8 +437,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, ol_flags = tx_pkt->ol_flags; /* If hardware offload required */ - tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM | - PKT_TX_L4_MASK)); + tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)); if (tx_ol_req) { hdrlen.f.vlan_tci = tx_pkt->vlan_tci; hdrlen.f.l2_len = tx_pkt->l2_len; @@ -474,12 +471,12 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_last = (uint16_t) (tx_last - txq->nb_tx_desc); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u" - " tx_first=%u tx_last=%u\n", - (unsigned) txq->port_id, - (unsigned) txq->queue_id, - (unsigned) tx_pkt->pkt_len, - (unsigned) tx_id, - (unsigned) tx_last); + " tx_first=%u tx_last=%u", + (unsigned) txq->port_id, + (unsigned) txq->queue_id, + (unsigned) tx_pkt->pkt_len, + (unsigned) tx_id, + (unsigned) tx_last); /* * Make sure there are enough TX descriptors available to @@ -487,8 +484,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * nb_used better be less than or equal to txq->tx_rs_thresh */ while (unlikely (nb_used > txq->nb_tx_free)) { - PMD_TX_FREE_LOG(DEBUG, - "Not enough free TX descriptors " + PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors " "nb_used=%4u nb_free=%4u " "(port=%d queue=%d)", nb_used, txq->nb_tx_free, @@ -611,8 +607,8 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set RS bit only on threshold packets' last descriptor */ if (txq->nb_tx_used >= txq->tx_rs_thresh) { PMD_TX_FREE_LOG(DEBUG, - "Setting RS bit on TXD id=" - "%4u (port=%d queue=%d)", + "Setting RS bit on TXD id=%4u " + "(port=%d queue=%d)", tx_last, txq->port_id, txq->queue_id); cmd_type_len |= E1000_TXD_CMD_RS; @@ -643,22 +639,21 @@ end_of_tx: * **********************************************************************/ -static inline uint16_t +static inline uint64_t rx_desc_status_to_pkt_flags(uint32_t rx_status) { - uint16_t pkt_flags; + uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ? - PKT_RX_VLAN_PKT : 0); + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0); return pkt_flags; } -static inline uint16_t +static inline uint64_t rx_desc_error_to_pkt_flags(uint32_t rx_error) { - uint16_t pkt_flags = 0; + uint64_t pkt_flags = 0; if (rx_error & E1000_RXD_ERR_IPE) pkt_flags |= PKT_RX_IP_CKSUM_BAD; @@ -734,18 +729,18 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "status=0x%x pkt_len=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) status, - (unsigned) rte_le_to_cpu_16(rxd.length)); + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x pkt_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", - (unsigned) rxq->port_id, - (unsigned) rxq->queue_id); + "queue_id=%u", + (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; } @@ -793,8 +788,8 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) - rxq->crc_len); - rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; - rte_packet_prefetch(rxm->data); + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); rxm->nb_segs = 1; rxm->next = NULL; rxm->pkt_len = pkt_len; @@ -802,8 +797,8 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->port = rxq->port_id; rxm->ol_flags = rx_desc_status_to_pkt_flags(status); - rxm->ol_flags = (uint16_t)(rxm->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors)); + rxm->ol_flags = rxm->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); @@ -828,10 +823,10 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) nb_hold, - (unsigned) nb_rx); + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); @@ -914,17 +909,17 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * to happen by sending specific "back-pressure" flow control * frames to its peer(s). */ - PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u " - "status=0x%x data_len=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) status, - (unsigned) rte_le_to_cpu_16(rxd.length)); + PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u " + "status=0x%x data_len=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) status, + (unsigned) rte_le_to_cpu_16(rxd.length)); nmb = rte_rxmbuf_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " - "queue_id=%u\n", (unsigned) rxq->port_id, - (unsigned) rxq->queue_id); + "queue_id=%u", (unsigned) rxq->port_id, + (unsigned) rxq->queue_id); rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; break; } @@ -963,7 +958,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ data_len = rte_le_to_cpu_16(rxd.length); rxm->data_len = data_len; - rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rxm->data_off = RTE_PKTMBUF_HEADROOM; /* * If this is the first buffer of the received packet, @@ -1028,14 +1023,15 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, first_seg->port = rxq->port_id; first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); - first_seg->ol_flags = (uint16_t)(first_seg->ol_flags | - rx_desc_error_to_pkt_flags(rxd.errors)); + first_seg->ol_flags = first_seg->ol_flags | + rx_desc_error_to_pkt_flags(rxd.errors); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* Prefetch data of first segment, if configured to do so. */ - rte_packet_prefetch(first_seg->data); + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); /* * Store the mbuf address into the next entry of the array @@ -1072,10 +1068,10 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold); if (nb_hold > rxq->rx_free_thresh) { PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " - "nb_hold=%u nb_rx=%u\n", - (unsigned) rxq->port_id, (unsigned) rxq->queue_id, - (unsigned) rx_id, (unsigned) nb_hold, - (unsigned) nb_rx); + "nb_hold=%u nb_rx=%u", + (unsigned) rxq->port_id, (unsigned) rxq->queue_id, + (unsigned) rx_id, (unsigned) nb_hold, + (unsigned) nb_rx); rx_id = (uint16_t) ((rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1)); E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); @@ -1233,18 +1229,21 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, DEFAULT_TX_RS_THRESH); if (tx_free_thresh >= (nb_desc - 3)) { - RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " - "number of TX descriptors minus 3. (tx_free_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_free_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the " + "number of TX descriptors minus 3. " + "(tx_free_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } if (tx_rs_thresh > tx_free_thresh) { - RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to " - "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_free_thresh, - (unsigned int)tx_rs_thresh, (int)dev->data->port_id, - (int)queue_idx); + PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to " + "tx_free_thresh. (tx_free_thresh=%u " + "tx_rs_thresh=%u port=%d queue=%d)", + (unsigned int)tx_free_thresh, + (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, + (int)queue_idx); return -(EINVAL); } @@ -1255,10 +1254,10 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, * accumulates WTHRESH descriptors. */ if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) { - RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if " - "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " - "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh, - (int)dev->data->port_id, (int)queue_idx); + PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if " + "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u " + "port=%d queue=%d)", (unsigned int)tx_rs_thresh, + (int)dev->data->port_id, (int)queue_idx); return -(EINVAL); } @@ -1308,8 +1307,8 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, #endif txq->tx_ring = (struct e1000_data_desc *) tz->addr; - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", - txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr); em_reset_tx_queue(txq); @@ -1388,7 +1387,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, * EM devices don't support drop_en functionality */ if (rx_conf->rx_drop_en) { - RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n"); + PMD_INIT_LOG(ERR, "drop_en functionality not supported by " + "device"); return (-EINVAL); } @@ -1437,8 +1437,8 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, #endif rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; - PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", - rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); + PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64, + rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr); dev->data->rx_queues[queue_idx] = rxq; em_reset_rx_queue(rxq); @@ -1455,7 +1455,7 @@ eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) uint32_t desc = 0; if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id); + PMD_RX_LOG(DEBUG, "Invalid RX queue_id=%d", rx_queue_id); return 0; } @@ -1601,7 +1601,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " - "queue_id=%hu\n", rxq->queue_id); + "queue_id=%hu", rxq->queue_id); return (-ENOMEM); } @@ -1730,6 +1730,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) */ if (dev->data->dev_conf.rxmode.jumbo_frame || rctl_bsize < ETHER_MAX_LEN) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_scattered_pkts; dev->data->scattered_rx = 1; @@ -1737,6 +1739,8 @@ eth_em_rx_init(struct rte_eth_dev *dev) } if (dev->data->dev_conf.rxmode.enable_scatter) { + if (!dev->data->scattered_rx) + PMD_INIT_LOG(DEBUG, "forcing scatter mode"); dev->rx_pkt_burst = eth_em_recv_scattered_pkts; dev->data->scattered_rx = 1; }