fd's cannot be shared between processes, and each process need to have
it's own fd's pointer.
Signed-off-by: Raslan Darawsheh <rasland@mellanox.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct rx_queue *rxq = queue;
pmd_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
{
struct rx_queue *rxq = queue;
+ struct pmd_process_private *process_private;
uint16_t num_rx;
unsigned long num_rx_bytes = 0;
uint32_t trigger = tap_trigger;
uint16_t num_rx;
unsigned long num_rx_bytes = 0;
uint32_t trigger = tap_trigger;
return 0;
if (trigger)
rxq->trigger_seen = trigger;
return 0;
if (trigger)
rxq->trigger_seen = trigger;
+ process_private = rte_eth_devices[rxq->in_port].process_private;
rte_compiler_barrier();
for (num_rx = 0; num_rx < nb_pkts; ) {
struct rte_mbuf *mbuf = rxq->pool;
rte_compiler_barrier();
for (num_rx = 0; num_rx < nb_pkts; ) {
struct rte_mbuf *mbuf = rxq->pool;
uint16_t data_off = rte_pktmbuf_headroom(mbuf);
int len;
uint16_t data_off = rte_pktmbuf_headroom(mbuf);
int len;
- len = readv(rxq->fd, *rxq->iovecs,
- 1 +
- (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
+ len = readv(process_private->rxq_fds[rxq->queue_id],
+ *rxq->iovecs,
+ 1 + (rxq->rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ?
rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
rxq->nb_rx_desc : 1));
if (len < (int)sizeof(struct tun_pi))
break;
{
int i;
uint16_t l234_hlen;
{
int i;
uint16_t l234_hlen;
+ struct pmd_process_private *process_private;
+
+ process_private = rte_eth_devices[txq->out_port].process_private;
for (i = 0; i < num_mbufs; i++) {
struct rte_mbuf *mbuf = pmbufs[i];
for (i = 0; i < num_mbufs; i++) {
struct rte_mbuf *mbuf = pmbufs[i];
tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
/* copy the tx frame data */
tap_tx_l4_cksum(l4_cksum, l4_phdr_cksum, l4_raw_cksum);
/* copy the tx frame data */
- n = writev(txq->fd, iovecs, j);
+ n = writev(process_private->txq_fds[txq->queue_id], iovecs, j);
if (n <= 0)
break;
(*num_packets)++;
if (n <= 0)
break;
(*num_packets)++;
{
int i;
struct pmd_internals *internals = dev->data->dev_private;
{
int i;
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
tap_link_set_down(dev);
tap_flow_flush(dev, NULL);
tap_flow_implicit_flush(internals, NULL);
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
tap_link_set_down(dev);
tap_flow_flush(dev, NULL);
tap_flow_implicit_flush(internals, NULL);
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- if (internals->rxq[i].fd != -1) {
- close(internals->rxq[i].fd);
- internals->rxq[i].fd = -1;
+ if (process_private->rxq_fds[i] != -1) {
+ close(process_private->rxq_fds[i]);
+ process_private->rxq_fds[i] = -1;
- if (internals->txq[i].fd != -1) {
- close(internals->txq[i].fd);
- internals->txq[i].fd = -1;
+ if (process_private->txq_fds[i] != -1) {
+ close(process_private->txq_fds[i]);
+ process_private->txq_fds[i] = -1;
tap_rx_queue_release(void *queue)
{
struct rx_queue *rxq = queue;
tap_rx_queue_release(void *queue)
{
struct rx_queue *rxq = queue;
+ struct pmd_process_private *process_private;
- if (rxq && (rxq->fd > 0)) {
- close(rxq->fd);
- rxq->fd = -1;
+ if (!rxq)
+ return;
+ process_private = rte_eth_devices[rxq->in_port].process_private;
+ if (process_private->rxq_fds[rxq->queue_id] > 0) {
+ close(process_private->rxq_fds[rxq->queue_id]);
+ process_private->rxq_fds[rxq->queue_id] = -1;
rte_pktmbuf_free(rxq->pool);
rte_free(rxq->iovecs);
rxq->pool = NULL;
rte_pktmbuf_free(rxq->pool);
rte_free(rxq->iovecs);
rxq->pool = NULL;
tap_tx_queue_release(void *queue)
{
struct tx_queue *txq = queue;
tap_tx_queue_release(void *queue)
{
struct tx_queue *txq = queue;
+ struct pmd_process_private *process_private;
+
+ if (!txq)
+ return;
+ process_private = rte_eth_devices[txq->out_port].process_private;
- if (txq && (txq->fd > 0)) {
- close(txq->fd);
- txq->fd = -1;
+ if (process_private->txq_fds[txq->queue_id] > 0) {
+ close(process_private->txq_fds[txq->queue_id]);
+ process_private->txq_fds[txq->queue_id] = -1;
int *other_fd;
const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
int *other_fd;
const char *dir;
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
struct rte_gso_ctx *gso_ctx;
if (is_rx) {
struct rx_queue *rx = &internals->rxq[qid];
struct tx_queue *tx = &internals->txq[qid];
struct rte_gso_ctx *gso_ctx;
if (is_rx) {
- fd = &rx->fd;
- other_fd = &tx->fd;
+ fd = &process_private->rxq_fds[qid];
+ other_fd = &process_private->txq_fds[qid];
dir = "rx";
gso_ctx = NULL;
} else {
dir = "rx";
gso_ctx = NULL;
} else {
- fd = &tx->fd;
- other_fd = &rx->fd;
+ fd = &process_private->txq_fds[qid];
+ other_fd = &process_private->rxq_fds[qid];
dir = "tx";
gso_ctx = &tx->gso_ctx;
}
dir = "tx";
gso_ctx = &tx->gso_ctx;
}
struct rte_mempool *mp)
{
struct pmd_internals *internals = dev->data->dev_private;
struct rte_mempool *mp)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
struct rte_mbuf **tmp = &rxq->pool;
long iov_max = sysconf(_SC_IOV_MAX);
struct rx_queue *rxq = &internals->rxq[rx_queue_id];
struct rte_mbuf **tmp = &rxq->pool;
long iov_max = sysconf(_SC_IOV_MAX);
}
TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
}
TAP_LOG(DEBUG, " RX TUNTAP device name %s, qid %d on fd %d",
- internals->name, rx_queue_id, internals->rxq[rx_queue_id].fd);
+ internals->name, rx_queue_id,
+ process_private->rxq_fds[rx_queue_id]);
const struct rte_eth_txconf *tx_conf)
{
struct pmd_internals *internals = dev->data->dev_private;
const struct rte_eth_txconf *tx_conf)
{
struct pmd_internals *internals = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
struct tx_queue *txq;
int ret;
uint64_t offloads;
struct tx_queue *txq;
int ret;
uint64_t offloads;
return -1;
TAP_LOG(DEBUG,
" TX TUNTAP device name %s, qid %d on fd %d csum %s",
return -1;
TAP_LOG(DEBUG,
" TX TUNTAP device name %s, qid %d on fd %d csum %s",
- internals->name, tx_queue_id, internals->txq[tx_queue_id].fd,
+ internals->name, tx_queue_id,
+ process_private->txq_fds[tx_queue_id],
txq->csum ? "on" : "off");
return 0;
txq->csum ? "on" : "off");
return 0;
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
struct pmd_internals *pmd;
int numa_node = rte_socket_id();
struct rte_eth_dev *dev;
struct pmd_internals *pmd;
+ struct pmd_process_private *process_private;
struct rte_eth_dev_data *data;
struct ifreq ifr;
int i;
struct rte_eth_dev_data *data;
struct ifreq ifr;
int i;
+ process_private = (struct pmd_process_private *)
+ rte_zmalloc_socket(tap_name, sizeof(struct pmd_process_private),
+ RTE_CACHE_LINE_SIZE, dev->device->numa_node);
+
+ if (process_private == NULL) {
+ TAP_LOG(ERR, "Failed to alloc memory for process private");
+ return -1;
+ }
pmd = dev->data->dev_private;
pmd = dev->data->dev_private;
+ dev->process_private = process_private;
pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
pmd->type = type;
pmd->dev = dev;
snprintf(pmd->name, sizeof(pmd->name), "%s", tap_name);
pmd->type = type;
/* Presetup the fds to -1 as being not valid */
pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
/* Presetup the fds to -1 as being not valid */
pmd->ka_fd = -1;
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- pmd->rxq[i].fd = -1;
- pmd->txq[i].fd = -1;
+ process_private->rxq_fds[i] = -1;
+ process_private->txq_fds[i] = -1;
}
if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
}
if (pmd->type == ETH_TUNTAP_TYPE_TAP) {
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
{
struct rte_eth_dev *eth_dev = NULL;
struct pmd_internals *internals;
+ struct pmd_process_private *process_private;
int i;
/* find the ethdev entry */
int i;
/* find the ethdev entry */
return rte_eth_dev_release_port_secondary(eth_dev);
internals = eth_dev->data->dev_private;
return rte_eth_dev_release_port_secondary(eth_dev);
internals = eth_dev->data->dev_private;
+ process_private = eth_dev->process_private;
TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
(internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
TAP_LOG(DEBUG, "Closing %s Ethernet device on numa %u",
(internals->type == ETH_TUNTAP_TYPE_TAP) ? "TAP" : "TUN",
tap_nl_final(internals->nlsk_fd);
}
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
tap_nl_final(internals->nlsk_fd);
}
for (i = 0; i < RTE_PMD_TAP_MAX_QUEUES; i++) {
- if (internals->rxq[i].fd != -1) {
- close(internals->rxq[i].fd);
- internals->rxq[i].fd = -1;
+ if (process_private->rxq_fds[i] != -1) {
+ close(process_private->rxq_fds[i]);
+ process_private->rxq_fds[i] = -1;
- if (internals->txq[i].fd != -1) {
- close(internals->txq[i].fd);
- internals->txq[i].fd = -1;
+ if (process_private->txq_fds[i] != -1) {
+ close(process_private->txq_fds[i]);
+ process_private->txq_fds[i] = -1;
}
}
close(internals->ioctl_sock);
rte_free(eth_dev->data->dev_private);
}
}
close(internals->ioctl_sock);
rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->process_private);
rte_eth_dev_release_port(eth_dev);
if (internals->ka_fd != -1) {
rte_eth_dev_release_port(eth_dev);
if (internals->ka_fd != -1) {
uint32_t trigger_seen; /* Last seen Rx trigger value */
uint16_t in_port; /* Port ID */
uint16_t queue_id; /* queue ID*/
uint32_t trigger_seen; /* Last seen Rx trigger value */
uint16_t in_port; /* Port ID */
uint16_t queue_id; /* queue ID*/
struct pkt_stats stats; /* Stats for this RX queue */
uint16_t nb_rx_desc; /* max number of mbufs available */
struct rte_eth_rxmode *rxmode; /* RX features */
struct pkt_stats stats; /* Stats for this RX queue */
uint16_t nb_rx_desc; /* max number of mbufs available */
struct rte_eth_rxmode *rxmode; /* RX features */
int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
int type; /* Type field - TUN|TAP */
uint16_t *mtu; /* Pointer to MTU from dev_data */
uint16_t csum:1; /* Enable checksum offloading */
int ka_fd; /* keep-alive file descriptor */
};
int ka_fd; /* keep-alive file descriptor */
};
+struct pmd_process_private {
+ int rxq_fds[RTE_PMD_TAP_MAX_QUEUES];
+ int txq_fds[RTE_PMD_TAP_MAX_QUEUES];
+};
+
/* tap_intr.c */
int tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set);
/* tap_intr.c */
int tap_rx_intr_vec_set(struct rte_eth_dev *dev, int set);
struct rte_flow_error *error __rte_unused)
{
struct pmd_internals *pmd = dev->data->dev_private;
struct rte_flow_error *error __rte_unused)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
/* normalize 'set' variable to contain 0 or 1 values */
if (set)
/* normalize 'set' variable to contain 0 or 1 values */
if (set)
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
* If netdevice is there, setup appropriate flow rules immediately.
* Otherwise it will be set when bringing up the netdevice (tun_alloc).
*/
+ if (!process_private->rxq_fds[0])
return 0;
if (set) {
struct rte_flow *remote_flow;
return 0;
if (set) {
struct rte_flow *remote_flow;
tap_rx_intr_vec_install(struct rte_eth_dev *dev)
{
struct pmd_internals *pmd = dev->data->dev_private;
tap_rx_intr_vec_install(struct rte_eth_dev *dev)
{
struct pmd_internals *pmd = dev->data->dev_private;
+ struct pmd_process_private *process_private = dev->process_private;
unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
struct rte_intr_handle *intr_handle = &pmd->intr_handle;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
unsigned int rxqs_n = pmd->dev->data->nb_rx_queues;
struct rte_intr_handle *intr_handle = &pmd->intr_handle;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
struct rx_queue *rxq = pmd->dev->data->rx_queues[i];
/* Skip queues that cannot request interrupts. */
- if (!rxq || rxq->fd <= 0) {
+ if (!rxq || process_private->rxq_fds[i] <= 0) {
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
/* Use invalid intr_vec[] index to disable entry. */
intr_handle->intr_vec[i] =
RTE_INTR_VEC_RXTX_OFFSET +
continue;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
continue;
}
intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = rxq->fd;
+ intr_handle->efds[count] = process_private->rxq_fds[i];