X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_event_eth_rx_adapter.c;h=95dd478201ee572264c9b889d5e5efa5316bbc6c;hb=441717b92c2a22021b7a38c43788897a09f23b6d;hp=a97d1985b333d3ddc0f2f19682997f8843093cf3;hpb=0c9da7555da8c8373dfd69f798f832723ae6de71;p=dpdk.git diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c index a97d1985b3..95dd478201 100644 --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c @@ -198,11 +198,8 @@ struct eth_rx_queue_info { int queue_enabled; /* True if added */ int intr_enabled; uint16_t wt; /* Polling weight */ - uint8_t event_queue_id; /* Event queue to enqueue packets to */ - uint8_t sched_type; /* Sched type for events */ - uint8_t priority; /* Event priority */ - uint32_t flow_id; /* App provided flow identifier */ uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ + uint64_t event; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -716,18 +713,6 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, } } -/* Add event to buffer, free space check is done prior to calling - * this function - */ -static inline void -rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter, - struct rte_event *ev) -{ - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; - rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event)); -} - /* Enqueue buffered events to event device */ static inline uint16_t rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) @@ -770,18 +755,16 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, &dev_info->rx_queue[rx_queue_id]; struct rte_eth_event_enqueue_buffer *buf = &rx_adapter->event_enqueue_buffer; - int32_t qid = eth_rx_queue_info->event_queue_id; - uint8_t sched_type = eth_rx_queue_info->sched_type; - uint8_t priority = eth_rx_queue_info->priority; - uint32_t flow_id; - struct rte_event events[BATCH_SIZE]; + struct rte_event *ev = &buf->events[buf->count]; + uint64_t event = eth_rx_queue_info->event; + uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; struct rte_mbuf *m = mbufs[0]; uint32_t rss_mask; uint32_t rss; int do_rss; uint64_t ts; - struct rte_mbuf *cb_mbufs[BATCH_SIZE]; uint16_t nb_cb; + uint16_t dropped; /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1); @@ -797,41 +780,35 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, } } - - nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id, - ETH_EVENT_BUFFER_SIZE, - buf->count, mbufs, - num, - dev_info->cb_arg, - cb_mbufs) : - num; - if (nb_cb < num) { - mbufs = cb_mbufs; - num = nb_cb; - } - for (i = 0; i < num; i++) { m = mbufs[i]; - struct rte_event *ev = &events[i]; rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss; - flow_id = - eth_rx_queue_info->flow_id & - eth_rx_queue_info->flow_id_mask; - flow_id |= rss & ~eth_rx_queue_info->flow_id_mask; - ev->flow_id = flow_id; - ev->op = RTE_EVENT_OP_NEW; - ev->sched_type = sched_type; - ev->queue_id = qid; - ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER; - ev->sub_event_type = 0; - ev->priority = priority; + ev->event = event; + ev->flow_id = (rss & ~flow_id_mask) | + (ev->flow_id & flow_id_mask); ev->mbuf = m; + ev++; + } + + if (dev_info->cb_fn) { - rxa_buffer_event(rx_adapter, ev); + dropped = 0; + nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, + ETH_EVENT_BUFFER_SIZE, buf->count, ev, + num, dev_info->cb_arg, &dropped); + if (unlikely(nb_cb > num)) + RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events", + nb_cb, num); + else + num = nb_cb; + if (dropped) + rx_adapter->stats.rx_dropped += dropped; } + + buf->count += num; } /* Enqueue packets from to event buffer */ @@ -1718,6 +1695,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int pollq; int intrq; int sintrq; + struct rte_event *qi_ev; if (rx_queue_id == -1) { uint16_t nb_rx_queues; @@ -1734,16 +1712,19 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, sintrq = rxa_shared_intr(dev_info, rx_queue_id); queue_info = &dev_info->rx_queue[rx_queue_id]; - queue_info->event_queue_id = ev->queue_id; - queue_info->sched_type = ev->sched_type; - queue_info->priority = ev->priority; queue_info->wt = conf->servicing_weight; + qi_ev = (struct rte_event *)&queue_info->event; + qi_ev->event = ev->event; + qi_ev->op = RTE_EVENT_OP_NEW; + qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER; + qi_ev->sub_event_type = 0; + if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) { - queue_info->flow_id = ev->flow_id; queue_info->flow_id_mask = ~0; - } + } else + qi_ev->flow_id = 0; rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1); if (rxa_polled_queue(dev_info, rx_queue_id)) { @@ -2297,7 +2278,7 @@ rte_event_eth_rx_adapter_stop(uint8_t id) return rxa_ctrl(id, 0); } -int __rte_experimental +int rte_event_eth_rx_adapter_stats_get(uint8_t id, struct rte_event_eth_rx_adapter_stats *stats) { @@ -2384,7 +2365,7 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) return rx_adapter->service_inited ? 0 : -ESRCH; } -int __rte_experimental +int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id, rte_event_eth_rx_adapter_cb_fn cb_fn,