X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_event_eth_rx_adapter.c;h=95dd478201ee572264c9b889d5e5efa5316bbc6c;hb=6d3f9917ff73c5149bc0926928dbfd808ea57011;hp=b6ff57b429882a2110d5d611370116429abd3b46;hpb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;p=dpdk.git diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c index b6ff57b429..95dd478201 100644 --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c @@ -198,11 +198,8 @@ struct eth_rx_queue_info { int queue_enabled; /* True if added */ int intr_enabled; uint16_t wt; /* Polling weight */ - uint8_t event_queue_id; /* Event queue to enqueue packets to */ - uint8_t sched_type; /* Sched type for events */ - uint8_t priority; /* Event priority */ - uint32_t flow_id; /* App provided flow identifier */ uint32_t flow_id_mask; /* Set to ~0 if app provides flow id else 0 */ + uint64_t event; }; static struct rte_event_eth_rx_adapter **event_eth_rx_adapter; @@ -611,8 +608,8 @@ rxa_calc_wrr_sequence(struct rte_event_eth_rx_adapter *rx_adapter, } static inline void -rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr, - struct ipv6_hdr **ipv6_hdr) +rxa_mtoip(struct rte_mbuf *m, struct rte_ipv4_hdr **ipv4_hdr, + struct rte_ipv6_hdr **ipv6_hdr) { struct rte_ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *); @@ -622,22 +619,22 @@ rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr, *ipv6_hdr = NULL; switch (eth_hdr->ether_type) { - case RTE_BE16(ETHER_TYPE_IPv4): - *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + *ipv4_hdr = (struct rte_ipv4_hdr *)(eth_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_IPv6): - *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1); + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + *ipv6_hdr = (struct rte_ipv6_hdr *)(eth_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_VLAN): + case RTE_BE16(RTE_ETHER_TYPE_VLAN): vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); switch (vlan_hdr->eth_proto) { - case RTE_BE16(ETHER_TYPE_IPv4): - *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1); + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + *ipv4_hdr = (struct rte_ipv4_hdr *)(vlan_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_IPv6): - *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1); + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + *ipv6_hdr = (struct rte_ipv6_hdr *)(vlan_hdr + 1); break; default: break; @@ -657,8 +654,8 @@ rxa_do_softrss(struct rte_mbuf *m, const uint8_t *rss_key_be) void *tuple; struct rte_ipv4_tuple ipv4_tuple; struct rte_ipv6_tuple ipv6_tuple; - struct ipv4_hdr *ipv4_hdr; - struct ipv6_hdr *ipv6_hdr; + struct rte_ipv4_hdr *ipv4_hdr; + struct rte_ipv6_hdr *ipv6_hdr; rxa_mtoip(m, &ipv4_hdr, &ipv6_hdr); @@ -716,18 +713,6 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter, } } -/* Add event to buffer, free space check is done prior to calling - * this function - */ -static inline void -rxa_buffer_event(struct rte_event_eth_rx_adapter *rx_adapter, - struct rte_event *ev) -{ - struct rte_eth_event_enqueue_buffer *buf = - &rx_adapter->event_enqueue_buffer; - rte_memcpy(&buf->events[buf->count++], ev, sizeof(struct rte_event)); -} - /* Enqueue buffered events to event device */ static inline uint16_t rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter) @@ -770,18 +755,16 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, &dev_info->rx_queue[rx_queue_id]; struct rte_eth_event_enqueue_buffer *buf = &rx_adapter->event_enqueue_buffer; - int32_t qid = eth_rx_queue_info->event_queue_id; - uint8_t sched_type = eth_rx_queue_info->sched_type; - uint8_t priority = eth_rx_queue_info->priority; - uint32_t flow_id; - struct rte_event events[BATCH_SIZE]; + struct rte_event *ev = &buf->events[buf->count]; + uint64_t event = eth_rx_queue_info->event; + uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask; struct rte_mbuf *m = mbufs[0]; uint32_t rss_mask; uint32_t rss; int do_rss; uint64_t ts; - struct rte_mbuf *cb_mbufs[BATCH_SIZE]; uint16_t nb_cb; + uint16_t dropped; /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1); @@ -797,41 +780,35 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, } } - - nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id, - ETH_EVENT_BUFFER_SIZE, - buf->count, mbufs, - num, - dev_info->cb_arg, - cb_mbufs) : - num; - if (nb_cb < num) { - mbufs = cb_mbufs; - num = nb_cb; - } - for (i = 0; i < num; i++) { m = mbufs[i]; - struct rte_event *ev = &events[i]; rss = do_rss ? rxa_do_softrss(m, rx_adapter->rss_key_be) : m->hash.rss; - flow_id = - eth_rx_queue_info->flow_id & - eth_rx_queue_info->flow_id_mask; - flow_id |= rss & ~eth_rx_queue_info->flow_id_mask; - ev->flow_id = flow_id; - ev->op = RTE_EVENT_OP_NEW; - ev->sched_type = sched_type; - ev->queue_id = qid; - ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER; - ev->sub_event_type = 0; - ev->priority = priority; + ev->event = event; + ev->flow_id = (rss & ~flow_id_mask) | + (ev->flow_id & flow_id_mask); ev->mbuf = m; + ev++; + } + + if (dev_info->cb_fn) { - rxa_buffer_event(rx_adapter, ev); + dropped = 0; + nb_cb = dev_info->cb_fn(eth_dev_id, rx_queue_id, + ETH_EVENT_BUFFER_SIZE, buf->count, ev, + num, dev_info->cb_arg, &dropped); + if (unlikely(nb_cb > num)) + RTE_EDEV_LOG_ERR("Rx CB returned %d (> %d) events", + nb_cb, num); + else + num = nb_cb; + if (dropped) + rx_adapter->stats.rx_dropped += dropped; } + + buf->count += num; } /* Enqueue packets from to event buffer */ @@ -1718,6 +1695,7 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, int pollq; int intrq; int sintrq; + struct rte_event *qi_ev; if (rx_queue_id == -1) { uint16_t nb_rx_queues; @@ -1734,16 +1712,19 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter, sintrq = rxa_shared_intr(dev_info, rx_queue_id); queue_info = &dev_info->rx_queue[rx_queue_id]; - queue_info->event_queue_id = ev->queue_id; - queue_info->sched_type = ev->sched_type; - queue_info->priority = ev->priority; queue_info->wt = conf->servicing_weight; + qi_ev = (struct rte_event *)&queue_info->event; + qi_ev->event = ev->event; + qi_ev->op = RTE_EVENT_OP_NEW; + qi_ev->event_type = RTE_EVENT_TYPE_ETH_RX_ADAPTER; + qi_ev->sub_event_type = 0; + if (conf->rx_queue_flags & RTE_EVENT_ETH_RX_ADAPTER_QUEUE_FLOW_ID_VALID) { - queue_info->flow_id = ev->flow_id; queue_info->flow_id_mask = ~0; - } + } else + qi_ev->flow_id = 0; rxa_update_queue(rx_adapter, dev_info, rx_queue_id, 1); if (rxa_polled_queue(dev_info, rx_queue_id)) { @@ -2297,7 +2278,7 @@ rte_event_eth_rx_adapter_stop(uint8_t id) return rxa_ctrl(id, 0); } -int __rte_experimental +int rte_event_eth_rx_adapter_stats_get(uint8_t id, struct rte_event_eth_rx_adapter_stats *stats) { @@ -2384,7 +2365,7 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) return rx_adapter->service_inited ? 0 : -ESRCH; } -int __rte_experimental +int rte_event_eth_rx_adapter_cb_register(uint8_t id, uint16_t eth_dev_id, rte_event_eth_rx_adapter_cb_fn cb_fn,