X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_event_eth_rx_adapter.c;h=e01d61b24daf90da8e4d77a1e88cc15696b982bc;hb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;hp=42dd7f80ef1eacfc0608ab3eded667040d0be12d;hpb=3810ae435783cae8feb96e90095f0ec84cc12d43;p=dpdk.git diff --git a/lib/librte_eventdev/rte_event_eth_rx_adapter.c b/lib/librte_eventdev/rte_event_eth_rx_adapter.c index 42dd7f80ef..e01d61b24d 100644 --- a/lib/librte_eventdev/rte_event_eth_rx_adapter.c +++ b/lib/librte_eventdev/rte_event_eth_rx_adapter.c @@ -147,6 +147,10 @@ struct rte_event_eth_rx_adapter { struct eth_device_info { struct rte_eth_dev *dev; struct eth_rx_queue_info *rx_queue; + /* Rx callback */ + rte_event_eth_rx_adapter_cb_fn cb_fn; + /* Rx callback argument */ + void *cb_arg; /* Set if ethdev->eventdev packet transfer uses a * hardware mechanism */ @@ -610,28 +614,29 @@ static inline void rxa_mtoip(struct rte_mbuf *m, struct ipv4_hdr **ipv4_hdr, struct ipv6_hdr **ipv6_hdr) { - struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *); - struct vlan_hdr *vlan_hdr; + struct rte_ether_hdr *eth_hdr = + rte_pktmbuf_mtod(m, struct rte_ether_hdr *); + struct rte_vlan_hdr *vlan_hdr; *ipv4_hdr = NULL; *ipv6_hdr = NULL; switch (eth_hdr->ether_type) { - case RTE_BE16(ETHER_TYPE_IPv4): + case RTE_BE16(RTE_ETHER_TYPE_IPv4): *ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_IPv6): + case RTE_BE16(RTE_ETHER_TYPE_IPv6): *ipv6_hdr = (struct ipv6_hdr *)(eth_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_VLAN): - vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1); + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + vlan_hdr = (struct rte_vlan_hdr *)(eth_hdr + 1); switch (vlan_hdr->eth_proto) { - case RTE_BE16(ETHER_TYPE_IPv4): + case RTE_BE16(RTE_ETHER_TYPE_IPv4): *ipv4_hdr = (struct ipv4_hdr *)(vlan_hdr + 1); break; - case RTE_BE16(ETHER_TYPE_IPv6): + case RTE_BE16(RTE_ETHER_TYPE_IPv6): *ipv6_hdr = (struct ipv6_hdr *)(vlan_hdr + 1); break; default: @@ -759,11 +764,12 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint16_t num) { uint32_t i; - struct eth_device_info *eth_device_info = + struct eth_device_info *dev_info = &rx_adapter->eth_devices[eth_dev_id]; struct eth_rx_queue_info *eth_rx_queue_info = - ð_device_info->rx_queue[rx_queue_id]; - + &dev_info->rx_queue[rx_queue_id]; + struct rte_eth_event_enqueue_buffer *buf = + &rx_adapter->event_enqueue_buffer; int32_t qid = eth_rx_queue_info->event_queue_id; uint8_t sched_type = eth_rx_queue_info->sched_type; uint8_t priority = eth_rx_queue_info->priority; @@ -774,6 +780,8 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, uint32_t rss; int do_rss; uint64_t ts; + struct rte_mbuf *cb_mbufs[BATCH_SIZE]; + uint16_t nb_cb; /* 0xffff ffff if PKT_RX_RSS_HASH is set, otherwise 0 */ rss_mask = ~(((m->ol_flags & PKT_RX_RSS_HASH) != 0) - 1); @@ -789,6 +797,19 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter, } } + + nb_cb = dev_info->cb_fn ? dev_info->cb_fn(eth_dev_id, rx_queue_id, + ETH_EVENT_BUFFER_SIZE, + buf->count, mbufs, + num, + dev_info->cb_arg, + cb_mbufs) : + num; + if (nb_cb < num) { + mbufs = cb_mbufs; + num = nb_cb; + } + for (i = 0; i < num; i++) { m = mbufs[i]; struct rte_event *ev = &events[i]; @@ -852,7 +873,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter, break; } - if (buf->count >= BATCH_SIZE) + if (buf->count > 0) rxa_flush_event_buffer(rx_adapter); return nb_rx; @@ -892,7 +913,7 @@ rxa_intr_ring_enqueue(struct rte_event_eth_rx_adapter *rx_adapter, */ if (err) RTE_EDEV_LOG_ERR("Failed to enqueue interrupt" - " to ring: %s", strerror(err)); + " to ring: %s", strerror(-err)); else rte_eth_dev_rx_intr_disable(port_id, queue); } @@ -1105,7 +1126,6 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter) wrr_pos = rx_adapter->wrr_pos; max_nb_rx = rx_adapter->max_nb_rx; buf = &rx_adapter->event_enqueue_buffer; - stats = &rx_adapter->stats; /* Iterate through a WRR sequence */ for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) { @@ -1146,8 +1166,8 @@ rxa_service_func(void *args) if (rte_spinlock_trylock(&rx_adapter->rx_lock) == 0) return 0; if (!rx_adapter->rxa_started) { - return 0; rte_spinlock_unlock(&rx_adapter->rx_lock); + return 0; } stats = &rx_adapter->stats; @@ -1978,8 +1998,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, rx_adapter->id = id; strcpy(rx_adapter->mem_name, mem_name); rx_adapter->eth_devices = rte_zmalloc_socket(rx_adapter->mem_name, - /* FIXME: incompatible with hotplug */ - rte_eth_dev_count_total() * + RTE_MAX_ETHPORTS * sizeof(struct eth_device_info), 0, socket_id); rte_convert_rss_key((const uint32_t *)default_rss_key, @@ -1992,7 +2011,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id, return -ENOMEM; } rte_spinlock_init(&rx_adapter->rx_lock); - RTE_ETH_FOREACH_DEV(i) + for (i = 0; i < RTE_MAX_ETHPORTS; i++) rx_adapter->eth_devices[i].dev = &rte_eth_devices[i]; event_eth_rx_adapter[id] = rx_adapter; @@ -2278,7 +2297,7 @@ rte_event_eth_rx_adapter_stop(uint8_t id) return rxa_ctrl(id, 0); } -int +int __rte_experimental rte_event_eth_rx_adapter_stats_get(uint8_t id, struct rte_event_eth_rx_adapter_stats *stats) { @@ -2364,3 +2383,48 @@ rte_event_eth_rx_adapter_service_id_get(uint8_t id, uint32_t *service_id) return rx_adapter->service_inited ? 0 : -ESRCH; } + +int __rte_experimental +rte_event_eth_rx_adapter_cb_register(uint8_t id, + uint16_t eth_dev_id, + rte_event_eth_rx_adapter_cb_fn cb_fn, + void *cb_arg) +{ + struct rte_event_eth_rx_adapter *rx_adapter; + struct eth_device_info *dev_info; + uint32_t cap; + int ret; + + RTE_EVENT_ETH_RX_ADAPTER_ID_VALID_OR_ERR_RET(id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_dev_id, -EINVAL); + + rx_adapter = rxa_id_to_adapter(id); + if (rx_adapter == NULL) + return -EINVAL; + + dev_info = &rx_adapter->eth_devices[eth_dev_id]; + if (dev_info->rx_queue == NULL) + return -EINVAL; + + ret = rte_event_eth_rx_adapter_caps_get(rx_adapter->eventdev_id, + eth_dev_id, + &cap); + if (ret) { + RTE_EDEV_LOG_ERR("Failed to get adapter caps edev %" PRIu8 + "eth port %" PRIu16, id, eth_dev_id); + return ret; + } + + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) { + RTE_EDEV_LOG_ERR("Rx callback not supported for eth port %" + PRIu16, eth_dev_id); + return -EINVAL; + } + + rte_spinlock_lock(&rx_adapter->rx_lock); + dev_info->cb_fn = cb_fn; + dev_info->cb_arg = cb_arg; + rte_spinlock_unlock(&rx_adapter->rx_lock); + + return 0; +}