if (header.len <= buf_size) {
/* pcap packet will fit in the mbuf, go ahead and copy */
- rte_memcpy(mbuf->pkt.data, packet, header.len);
- mbuf->pkt.data_len = (uint16_t)header.len;
- mbuf->pkt.pkt_len = mbuf->pkt.data_len;
- bufs[i] = mbuf;
+ rte_memcpy(rte_pktmbuf_mtod(mbuf, void *), packet,
+ header.len);
+ mbuf->data_len = (uint16_t)header.len;
+ mbuf->pkt_len = mbuf->data_len;
+ bufs[num_rx] = mbuf;
num_rx++;
} else {
/* pcap packet will not fit in the mbuf, so drop packet */
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
calculate_timestamp(&header.ts);
- header.len = mbuf->pkt.data_len;
+ header.len = mbuf->data_len;
header.caplen = header.len;
- pcap_dump((u_char*) dumper_q->dumper, &header, mbuf->pkt.data);
+ pcap_dump((u_char *)dumper_q->dumper, &header,
+ rte_pktmbuf_mtod(mbuf, void*));
rte_pktmbuf_free(mbuf);
num_tx++;
}
for (i = 0; i < nb_pkts; i++) {
mbuf = bufs[i];
- ret = pcap_sendpacket(tx_queue->pcap, (u_char*) mbuf->pkt.data,
- mbuf->pkt.data_len);
+ ret = pcap_sendpacket(tx_queue->pcap,
+ rte_pktmbuf_mtod(mbuf, u_char *),
+ mbuf->data_len);
if (unlikely(ret != 0))
break;
num_tx++;
static int
-rte_pmd_init_internals(const unsigned nb_rx_queues,
+rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
const unsigned nb_tx_queues,
const unsigned numa_node,
struct pmd_internals **internals,
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
- data = rte_zmalloc_socket(NULL, sizeof(*data), 0, numa_node);
+ data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
if (data == NULL)
goto error;
- pci_dev = rte_zmalloc_socket(NULL, sizeof(*pci_dev), 0, numa_node);
+ pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, numa_node);
if (pci_dev == NULL)
goto error;
- *internals = rte_zmalloc_socket(NULL, sizeof(**internals), 0, numa_node);
+ *internals = rte_zmalloc_socket(name, sizeof(**internals), 0, numa_node);
if (*internals == NULL)
goto error;
/* reserve an ethdev entry */
- *eth_dev = rte_eth_dev_allocate();
+ *eth_dev = rte_eth_dev_allocate(name);
if (*eth_dev == NULL)
goto error;
}
static int
-rte_eth_from_pcaps_n_dumpers(pcap_t * const rx_queues[],
+rte_eth_from_pcaps_n_dumpers(const char *name, pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_dumper_t * const tx_queues[],
const unsigned nb_tx_queues,
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (rte_pmd_init_internals(nb_rx_queues, nb_tx_queues, numa_node,
+ if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
&internals, ð_dev, kvlist) < 0)
return -1;
}
static int
-rte_eth_from_pcaps(pcap_t * const rx_queues[],
+rte_eth_from_pcaps(const char *name, pcap_t * const rx_queues[],
const unsigned nb_rx_queues,
pcap_t * const tx_queues[],
const unsigned nb_tx_queues,
if (tx_queues == NULL && nb_tx_queues > 0)
return -1;
- if (rte_pmd_init_internals(nb_rx_queues, nb_tx_queues, numa_node,
+ if (rte_pmd_init_internals(name, nb_rx_queues, nb_tx_queues, numa_node,
&internals, ð_dev, kvlist) < 0)
return -1;
if (ret < 0)
return -1;
- return rte_eth_from_pcaps(pcaps.pcaps, 1, pcaps.pcaps, 1,
+ return rte_eth_from_pcaps(name, pcaps.pcaps, 1, pcaps.pcaps, 1,
numa_node, kvlist);
}
return -1;
if (using_dumpers)
- return rte_eth_from_pcaps_n_dumpers(pcaps.pcaps, pcaps.num_of_rx,
+ return rte_eth_from_pcaps_n_dumpers(name, pcaps.pcaps, pcaps.num_of_rx,
dumpers.dumpers, dumpers.num_of_tx, numa_node, kvlist);
- return rte_eth_from_pcaps(pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
+ return rte_eth_from_pcaps(name, pcaps.pcaps, pcaps.num_of_rx, dumpers.pcaps,
dumpers.num_of_tx, numa_node, kvlist);
}