X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_e1000%2Fem_rxtx.c;h=ed3a6fcc808f1547b3a876ce64f8165ced57f387;hb=08b563ffb19d8baf59dd84200f25bc85031d18a7;hp=475471bf0eb1b2259e1023d5e94074129b2e40ae;hpb=3f6899edd74833f56bc26a4e2378c641e01d0a90;p=dpdk.git diff --git a/lib/librte_pmd_e1000/em_rxtx.c b/lib/librte_pmd_e1000/em_rxtx.c index 475471bf0e..ed3a6fcc80 100644 --- a/lib/librte_pmd_e1000/em_rxtx.c +++ b/lib/librte_pmd_e1000/em_rxtx.c @@ -1,13 +1,13 @@ /*- * BSD LICENSE - * + * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * + * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: - * + * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright @@ -17,7 +17,7 @@ * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * + * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR @@ -85,13 +85,12 @@ rte_rxmbuf_alloc(struct rte_mempool *mp) struct rte_mbuf *m; m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0); + __rte_mbuf_sanity_check_raw(m, 0); return (m); } #define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + \ - (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr)) + (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \ (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM) @@ -144,13 +143,34 @@ enum { EM_CTX_NUM = 1, /**< CTX NUM */ }; +/** Offload features */ +union em_vlan_macip { + uint32_t data; + struct { + uint16_t l3_len:9; /**< L3 (IP) Header Length. */ + uint16_t l2_len:7; /**< L2 (MAC) Header Length. */ + uint16_t vlan_tci; + /**< VLAN Tag Control Identifier (CPU order). */ + } f; +}; + +/* + * Compare mask for vlan_macip_len.data, + * should be in sync with em_vlan_macip.f layout. + * */ +#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */ +#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */ +#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */ +/** MAC+IP length. */ +#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK) + /** * Structure to check if new context need be built */ struct em_ctx_info { - uint16_t flags; /**< ol_flags related to context build. */ - uint32_t cmp_mask; /**< compare mask */ - union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ + uint16_t flags; /**< ol_flags related to context build. */ + uint32_t cmp_mask; /**< compare mask */ + union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */ }; /** @@ -219,7 +239,7 @@ static inline void em_set_xmit_ctx(struct em_tx_queue* txq, volatile struct e1000_context_desc *ctx_txd, uint16_t flags, - union rte_vlan_macip hdrlen) + union em_vlan_macip hdrlen) { uint32_t cmp_mask, cmd_len; uint16_t ipcse, l2len; @@ -285,7 +305,7 @@ em_set_xmit_ctx(struct em_tx_queue* txq, */ static inline uint32_t what_ctx_update(struct em_tx_queue *txq, uint16_t flags, - union rte_vlan_macip hdrlen) + union em_vlan_macip hdrlen) { /* If match with the current context */ if (likely (txq->ctx_cache.flags == flags && @@ -391,7 +411,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t tx_ol_req; uint32_t ctx; uint32_t new_ctx; - union rte_vlan_macip hdrlen; + union em_vlan_macip hdrlen; txq = tx_queue; sw_ring = txq->sw_ring; @@ -421,7 +441,9 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)); if (tx_ol_req) { - hdrlen = tx_pkt->pkt.vlan_macip; + hdrlen.f.vlan_tci = tx_pkt->vlan_tci; + hdrlen.f.l2_len = tx_pkt->l2_len; + hdrlen.f.l3_len = tx_pkt->l3_len; /* If new context to be built or reuse the exist ctx. */ ctx = what_ctx_update(txq, tx_ol_req, hdrlen); @@ -434,7 +456,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * This will always be the number of segments + the number of * Context descriptors required to transmit the packet */ - nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx); + nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx); /* * The number of descriptors that must be allocated for a @@ -454,7 +476,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, " tx_first=%u tx_last=%u\n", (unsigned) txq->port_id, (unsigned) txq->queue_id, - (unsigned) tx_pkt->pkt.pkt_len, + (unsigned) tx_pkt->pkt_len, (unsigned) tx_id, (unsigned) tx_last); @@ -516,8 +538,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Set VLAN Tag offload fields. */ if (ol_flags & PKT_TX_VLAN_PKT) { cmd_type_len |= E1000_TXD_CMD_VLE; - popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci << - E1000_TXD_VLAN_SHIFT; + popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT; } if (tx_ol_req) { @@ -566,7 +587,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* * Set up Transmit Data Descriptor. */ - slen = m_seg->pkt.data_len; + slen = m_seg->data_len; buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg); txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr); @@ -576,7 +597,7 @@ eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, txe->last_id = tx_last; tx_id = txe->next_id; txe = txn; - m_seg = m_seg->pkt.next; + m_seg = m_seg->next; } while (m_seg != NULL); /* @@ -771,20 +792,20 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) - rxq->crc_len); - rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; - rte_packet_prefetch(rxm->pkt.data); - rxm->pkt.nb_segs = 1; - rxm->pkt.next = NULL; - rxm->pkt.pkt_len = pkt_len; - rxm->pkt.data_len = pkt_len; - rxm->pkt.in_port = rxq->port_id; + rxm->data_off = RTE_PKTMBUF_HEADROOM; + rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off); + rxm->nb_segs = 1; + rxm->next = NULL; + rxm->pkt_len = pkt_len; + rxm->data_len = pkt_len; + rxm->port = rxq->port_id; rxm->ol_flags = rx_desc_status_to_pkt_flags(status); rxm->ol_flags = (uint16_t)(rxm->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors)); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special); + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* * Store the mbuf address into the next entry of the array @@ -940,8 +961,8 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * Set data length & data buffer address of mbuf. */ data_len = rte_le_to_cpu_16(rxd.length); - rxm->pkt.data_len = data_len; - rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM; + rxm->data_len = data_len; + rxm->data_off = RTE_PKTMBUF_HEADROOM; /* * If this is the first buffer of the received packet, @@ -953,12 +974,12 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ if (first_seg == NULL) { first_seg = rxm; - first_seg->pkt.pkt_len = data_len; - first_seg->pkt.nb_segs = 1; + first_seg->pkt_len = data_len; + first_seg->nb_segs = 1; } else { - first_seg->pkt.pkt_len += data_len; - first_seg->pkt.nb_segs++; - last_seg->pkt.next = rxm; + first_seg->pkt_len += data_len; + first_seg->nb_segs++; + last_seg->next = rxm; } /* @@ -981,18 +1002,18 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * mbuf, subtract the length of that CRC part from the * data length of the previous mbuf. */ - rxm->pkt.next = NULL; + rxm->next = NULL; if (unlikely(rxq->crc_len > 0)) { - first_seg->pkt.pkt_len -= ETHER_CRC_LEN; + first_seg->pkt_len -= ETHER_CRC_LEN; if (data_len <= ETHER_CRC_LEN) { rte_pktmbuf_free_seg(rxm); - first_seg->pkt.nb_segs--; - last_seg->pkt.data_len = (uint16_t) - (last_seg->pkt.data_len - + first_seg->nb_segs--; + last_seg->data_len = (uint16_t) + (last_seg->data_len - (ETHER_CRC_LEN - data_len)); - last_seg->pkt.next = NULL; + last_seg->next = NULL; } else - rxm->pkt.data_len = + rxm->data_len = (uint16_t) (data_len - ETHER_CRC_LEN); } @@ -1003,17 +1024,18 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * - IP checksum flag, * - error flags. */ - first_seg->pkt.in_port = rxq->port_id; + first_seg->port = rxq->port_id; first_seg->ol_flags = rx_desc_status_to_pkt_flags(status); first_seg->ol_flags = (uint16_t)(first_seg->ol_flags | rx_desc_error_to_pkt_flags(rxd.errors)); /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ - rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special); + rxm->vlan_tci = rte_le_to_cpu_16(rxd.special); /* Prefetch data of first segment, if configured to do so. */ - rte_packet_prefetch(first_seg->pkt.data); + rte_packet_prefetch((char *)first_seg->buf_addr + + first_seg->data_off); /* * Store the mbuf address into the next entry of the array @@ -1093,14 +1115,14 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; char z_name[RTE_MEMZONE_NAMESIZE]; - rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", + snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", dev->driver->pci_drv.name, ring_name, dev->data->port_id, queue_id); if ((mz = rte_memzone_lookup(z_name)) != 0) return (mz); -#ifdef RTE_LIBRTE_XEN_DOM0 +#ifdef RTE_LIBRTE_XEN_DOM0 return rte_memzone_reserve_bounded(z_name, ring_size, socket_id, 0, CACHE_LINE_SIZE, RTE_PGSIZE_2M); #else @@ -1275,15 +1297,13 @@ eth_em_tx_queue_setup(struct rte_eth_dev *dev, txq->pthresh = tx_conf->tx_thresh.pthresh; txq->hthresh = tx_conf->tx_thresh.hthresh; txq->wthresh = tx_conf->tx_thresh.wthresh; - if (txq->wthresh > 0 && hw->mac.type == e1000_82576) - txq->wthresh = 1; txq->queue_id = queue_idx; txq->port_id = dev->data->port_id; txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx)); #ifndef RTE_LIBRTE_XEN_DOM0 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr; -#else +#else txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr); #endif txq->tx_ring = (struct e1000_data_desc *) tz->addr; @@ -1402,9 +1422,6 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->pthresh = rx_conf->rx_thresh.pthresh; rxq->hthresh = rx_conf->rx_thresh.hthresh; rxq->wthresh = rx_conf->rx_thresh.wthresh; - if (rxq->wthresh > 0 && hw->mac.type == e1000_82576) - rxq->wthresh = 1; - rxq->rx_free_thresh = rx_conf->rx_free_thresh; rxq->queue_id = queue_idx; rxq->port_id = dev->data->port_id; @@ -1413,11 +1430,11 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx)); rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx)); -#ifndef RTE_LIBRTE_XEN_DOM0 +#ifndef RTE_LIBRTE_XEN_DOM0 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr; #else - rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); -#endif + rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr); +#endif rxq->rx_ring = (struct e1000_rx_desc *) rz->addr; PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n", @@ -1429,7 +1446,7 @@ eth_em_rx_queue_setup(struct rte_eth_dev *dev, return (0); } -uint32_t +uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) { #define EM_RXQ_SCAN_INTERVAL 4 @@ -1585,7 +1602,6 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " "queue_id=%hu\n", rxq->queue_id); - em_rx_queue_release(rxq); return (-ENOMEM); } @@ -1708,7 +1724,7 @@ eth_em_rx_init(struct rte_eth_dev *dev) * limit for packet length, jumbo frame of any size * can be accepted, thus we have to enable scattered * rx if jumbo frames are enabled (or if buffer size - * is too small to accomodate non-jumbo packets) + * is too small to accommodate non-jumbo packets) * to avoid splitting packets that don't fit into * one buffer. */ @@ -1720,6 +1736,11 @@ eth_em_rx_init(struct rte_eth_dev *dev) } } + if (dev->data->dev_conf.rxmode.enable_scatter) { + dev->rx_pkt_burst = eth_em_recv_scattered_pkts; + dev->data->scattered_rx = 1; + } + /* * Setup the Checksum Register. * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.