git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mbuf: remove the rte_pktmbuf structure
[dpdk.git]
/
lib
/
librte_pmd_e1000
/
em_rxtx.c
diff --git
a/lib/librte_pmd_e1000/em_rxtx.c
b/lib/librte_pmd_e1000/em_rxtx.c
index
0bebfe2
..
058e1bd
100644
(file)
--- a/
lib/librte_pmd_e1000/em_rxtx.c
+++ b/
lib/librte_pmd_e1000/em_rxtx.c
@@
-1,13
+1,13
@@
/*-
* BSD LICENSE
/*-
* BSD LICENSE
- *
- * Copyright(c) 2010-201
3
Intel Corporation. All rights reserved.
+ *
+ * Copyright(c) 2010-201
4
Intel Corporation. All rights reserved.
* All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
@@
-17,7
+17,7
@@
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@
-33,7
+33,6
@@
#include <sys/queue.h>
#include <sys/queue.h>
-#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@
-74,6
+73,7
@@
#include "e1000_logs.h"
#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
#include "e1000_logs.h"
#include "e1000/e1000_api.h"
#include "e1000_ethdev.h"
+#include "e1000/e1000_osdep.h"
#define E1000_TXD_VLAN_SHIFT 16
#define E1000_TXD_VLAN_SHIFT 16
@@
-85,13
+85,13
@@
rte_rxmbuf_alloc(struct rte_mempool *mp)
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
struct rte_mbuf *m;
m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m,
RTE_MBUF_PKT,
0);
+ __rte_mbuf_sanity_check_raw(m, 0);
return (m);
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
return (m);
}
#define RTE_MBUF_DATA_DMA_ADDR(mb) \
(uint64_t) ((mb)->buf_physaddr + \
- (uint64_t) ((char *)((mb)->
pkt.
data) - (char *)(mb)->buf_addr))
+ (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
@@
-421,7
+421,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->
pkt.
vlan_macip;
+ hdrlen = tx_pkt->vlan_macip;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
@@
-434,7
+434,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
* This will always be the number of segments + the number of
* Context descriptors required to transmit the packet
*/
- nb_used = (uint16_t)(tx_pkt->
pkt.
nb_segs + new_ctx);
+ nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
/*
* The number of descriptors that must be allocated for a
/*
* The number of descriptors that must be allocated for a
@@
-454,7
+454,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
" tx_first=%u tx_last=%u\n",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
" tx_first=%u tx_last=%u\n",
(unsigned) txq->port_id,
(unsigned) txq->queue_id,
- (unsigned) tx_pkt->pkt
.pkt
_len,
+ (unsigned) tx_pkt->pkt_len,
(unsigned) tx_id,
(unsigned) tx_last);
(unsigned) tx_id,
(unsigned) tx_last);
@@
-516,7
+516,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->
pkt.
vlan_macip.f.vlan_tci <<
+ popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
E1000_TXD_VLAN_SHIFT;
}
E1000_TXD_VLAN_SHIFT;
}
@@
-566,7
+566,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
/*
* Set up Transmit Data Descriptor.
*/
/*
* Set up Transmit Data Descriptor.
*/
- slen = m_seg->
pkt.
data_len;
+ slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
@@
-576,7
+576,7
@@
eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
- m_seg = m_seg->
pkt.
next;
+ m_seg = m_seg->next;
} while (m_seg != NULL);
/*
} while (m_seg != NULL);
/*
@@
-771,20
+771,20
@@
eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
*/
pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
rxq->crc_len);
- rxm->
pkt.
data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
- rte_packet_prefetch(rxm->
pkt.
data);
- rxm->
pkt.
nb_segs = 1;
- rxm->
pkt.
next = NULL;
- rxm->pkt
.pkt
_len = pkt_len;
- rxm->
pkt.
data_len = pkt_len;
- rxm->
pkt.
in_port = rxq->port_id;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rte_packet_prefetch(rxm->data);
+ rxm->nb_segs = 1;
+ rxm->next = NULL;
+ rxm->pkt_len = pkt_len;
+ rxm->data_len = pkt_len;
+ rxm->in_port = rxq->port_id;
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
rxm->ol_flags = (uint16_t)(rxm->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->
pkt.
vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
/*
* Store the mbuf address into the next entry of the array
@@
-940,8
+940,8
@@
eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.length);
* Set data length & data buffer address of mbuf.
*/
data_len = rte_le_to_cpu_16(rxd.length);
- rxm->
pkt.
data_len = data_len;
- rxm->
pkt.
data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
+ rxm->data_len = data_len;
+ rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
/*
* If this is the first buffer of the received packet,
/*
* If this is the first buffer of the received packet,
@@
-953,12
+953,12
@@
eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
if (first_seg == NULL) {
first_seg = rxm;
*/
if (first_seg == NULL) {
first_seg = rxm;
- first_seg->pkt
.pkt
_len = data_len;
- first_seg->
pkt.
nb_segs = 1;
+ first_seg->pkt_len = data_len;
+ first_seg->nb_segs = 1;
} else {
} else {
- first_seg->pkt
.pkt
_len += data_len;
- first_seg->
pkt.
nb_segs++;
- last_seg->
pkt.
next = rxm;
+ first_seg->pkt_len += data_len;
+ first_seg->nb_segs++;
+ last_seg->next = rxm;
}
/*
}
/*
@@
-981,18
+981,18
@@
eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
* mbuf, subtract the length of that CRC part from the
* data length of the previous mbuf.
*/
- rxm->
pkt.
next = NULL;
+ rxm->next = NULL;
if (unlikely(rxq->crc_len > 0)) {
if (unlikely(rxq->crc_len > 0)) {
- first_seg->pkt
.pkt
_len -= ETHER_CRC_LEN;
+ first_seg->pkt_len -= ETHER_CRC_LEN;
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
if (data_len <= ETHER_CRC_LEN) {
rte_pktmbuf_free_seg(rxm);
- first_seg->
pkt.
nb_segs--;
- last_seg->
pkt.
data_len = (uint16_t)
- (last_seg->
pkt.
data_len -
+ first_seg->nb_segs--;
+ last_seg->data_len = (uint16_t)
+ (last_seg->data_len -
(ETHER_CRC_LEN - data_len));
(ETHER_CRC_LEN - data_len));
- last_seg->
pkt.
next = NULL;
+ last_seg->next = NULL;
} else
} else
- rxm->
pkt.
data_len =
+ rxm->data_len =
(uint16_t) (data_len - ETHER_CRC_LEN);
}
(uint16_t) (data_len - ETHER_CRC_LEN);
}
@@
-1003,17
+1003,17
@@
eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
* - IP checksum flag,
* - error flags.
*/
* - IP checksum flag,
* - error flags.
*/
- first_seg->
pkt.
in_port = rxq->port_id;
+ first_seg->in_port = rxq->port_id;
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->
pkt.
vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
/* Prefetch data of first segment, if configured to do so. */
- rte_packet_prefetch(first_seg->
pkt.
data);
+ rte_packet_prefetch(first_seg->data);
/*
* Store the mbuf address into the next entry of the array
/*
* Store the mbuf address into the next entry of the array
@@
-1093,14
+1093,19
@@
ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
const struct rte_memzone *mz;
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
char z_name[RTE_MEMZONE_NAMESIZE];
-
rte_
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
+ snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
dev->driver->pci_drv.name, ring_name, dev->data->port_id,
queue_id);
if ((mz = rte_memzone_lookup(z_name)) != 0)
return (mz);
dev->driver->pci_drv.name, ring_name, dev->data->port_id,
queue_id);
if ((mz = rte_memzone_lookup(z_name)) != 0)
return (mz);
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(z_name, ring_size,
+ socket_id, 0, CACHE_LINE_SIZE, RTE_PGSIZE_2M);
+#else
return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
+#endif
}
static void
}
static void
@@
-1270,13
+1275,15
@@
eth_em_tx_queue_setup(struct rte_eth_dev *dev,
txq->pthresh = tx_conf->tx_thresh.pthresh;
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
txq->pthresh = tx_conf->tx_thresh.pthresh;
txq->hthresh = tx_conf->tx_thresh.hthresh;
txq->wthresh = tx_conf->tx_thresh.wthresh;
- if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
- txq->wthresh = 1;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
+#else
+ txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
+#endif
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
txq->tx_ring = (struct e1000_data_desc *) tz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
@@
-1393,9
+1400,6
@@
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
rxq->pthresh = rx_conf->rx_thresh.pthresh;
rxq->hthresh = rx_conf->rx_thresh.hthresh;
rxq->wthresh = rx_conf->rx_thresh.wthresh;
rxq->pthresh = rx_conf->rx_thresh.pthresh;
rxq->hthresh = rx_conf->rx_thresh.hthresh;
rxq->wthresh = rx_conf->rx_thresh.wthresh;
- if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
- rxq->wthresh = 1;
-
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
rxq->rx_free_thresh = rx_conf->rx_free_thresh;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
@@
-1403,8
+1407,12
@@
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
0 : ETHER_CRC_LEN);
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
0 : ETHER_CRC_LEN);
rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
- rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
+ rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
+#ifndef RTE_LIBRTE_XEN_DOM0
rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
+#else
+ rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
+#endif
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
@@
-1416,7
+1424,7
@@
eth_em_rx_queue_setup(struct rte_eth_dev *dev,
return (0);
}
return (0);
}
-uint32_t
+uint32_t
eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define EM_RXQ_SCAN_INTERVAL 4
eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
#define EM_RXQ_SCAN_INTERVAL 4
@@
-1449,7
+1457,7
@@
eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
{
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq = rx_queue;
{
volatile struct e1000_rx_desc *rxdp;
struct em_rx_queue *rxq = rx_queue;
- uint
16
_t desc;
+ uint
32
_t desc;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
if (unlikely(offset >= rxq->nb_rx_desc))
return 0;
@@
-1572,7
+1580,6
@@
em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu\n", rxq->queue_id);
if (mbuf == NULL) {
PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
"queue_id=%hu\n", rxq->queue_id);
- em_rx_queue_release(rxq);
return (-ENOMEM);
}
return (-ENOMEM);
}
@@
-1695,7
+1702,7
@@
eth_em_rx_init(struct rte_eth_dev *dev)
* limit for packet length, jumbo frame of any size
* can be accepted, thus we have to enable scattered
* rx if jumbo frames are enabled (or if buffer size
* limit for packet length, jumbo frame of any size
* can be accepted, thus we have to enable scattered
* rx if jumbo frames are enabled (or if buffer size
- * is too small to accomodate non-jumbo packets)
+ * is too small to accom
m
odate non-jumbo packets)
* to avoid splitting packets that don't fit into
* one buffer.
*/
* to avoid splitting packets that don't fit into
* one buffer.
*/
@@
-1707,6
+1714,11
@@
eth_em_rx_init(struct rte_eth_dev *dev)
}
}
}
}
+ if (dev->data->dev_conf.rxmode.enable_scatter) {
+ dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
+ dev->data->scattered_rx = 1;
+ }
+
/*
* Setup the Checksum Register.
* Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
/*
* Setup the Checksum Register.
* Receive Full-Packet Checksum Offload is mutually exclusive with RSS.