X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_kni%2Frte_kni.c;h=4b51fb4fedc1811a4d225e686ec44325f8c9660b;hb=47caefc163b884ed1911c6eec74d329d63ee46c5;hp=492e207a31a28aca88d9ffa35af573d38be94446;hpb=742bde12f3bd008c2f06d744330436d8b1a56720;p=dpdk.git diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c index 492e207a31..4b51fb4fed 100644 --- a/lib/librte_kni/rte_kni.c +++ b/lib/librte_kni/rte_kni.c @@ -21,7 +21,7 @@ #include #include #include -#include +#include #include "rte_kni_fifo.h" #define MAX_MBUF_BURST_NUM 32 @@ -59,7 +59,7 @@ struct rte_kni { uint16_t group_id; /**< Group ID of KNI devices */ uint32_t slot_id; /**< KNI pool slot ID */ struct rte_mempool *pktmbuf_pool; /**< pkt mbuf mempool */ - unsigned mbuf_size; /**< mbuf size */ + unsigned int mbuf_size; /**< mbuf size */ const struct rte_memzone *m_tx_q; /**< TX queue memzone */ const struct rte_memzone *m_rx_q; /**< RX queue memzone */ @@ -78,7 +78,7 @@ struct rte_kni { /* For request & response */ struct rte_kni_fifo *req_q; /**< Request queue */ struct rte_kni_fifo *resp_q; /**< Response queue */ - void * sync_addr; /**< Req/Resp Mem address */ + void *sync_addr; /**< Req/Resp Mem address */ struct rte_kni_ops ops; /**< operations for request */ }; @@ -97,6 +97,11 @@ static volatile int kni_fd = -1; int rte_kni_init(unsigned int max_kni_ifaces __rte_unused) { + if (rte_eal_iova_mode() != RTE_IOVA_PA) { + RTE_LOG(ERR, KNI, "KNI requires IOVA as PA\n"); + return -1; + } + /* Check FD and open */ if (kni_fd < 0) { kni_fd = open("/dev/" KNI_DEVICE, O_RDWR); @@ -214,7 +219,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, return NULL; } - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); kni = __rte_kni_get(conf->name); if (kni != NULL) { @@ -234,7 +239,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, goto kni_fail; } - snprintf(kni->name, RTE_KNI_NAMESIZE, "%s", conf->name); + strlcpy(kni->name, conf->name, RTE_KNI_NAMESIZE); if (ops) memcpy(&kni->ops, ops, sizeof(struct rte_kni_ops)); @@ -242,24 +247,15 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, kni->ops.port_id = UINT16_MAX; memset(&dev_info, 0, sizeof(dev_info)); - dev_info.bus = conf->addr.bus; - dev_info.devid = conf->addr.devid; - dev_info.function = conf->addr.function; - dev_info.vendor_id = conf->id.vendor_id; - dev_info.device_id = conf->id.device_id; dev_info.core_id = conf->core_id; dev_info.force_bind = conf->force_bind; dev_info.group_id = conf->group_id; dev_info.mbuf_size = conf->mbuf_size; dev_info.mtu = conf->mtu; - memcpy(dev_info.mac_addr, conf->mac_addr, ETHER_ADDR_LEN); - - snprintf(dev_info.name, RTE_KNI_NAMESIZE, "%s", conf->name); + memcpy(dev_info.mac_addr, conf->mac_addr, RTE_ETHER_ADDR_LEN); - RTE_LOG(INFO, KNI, "pci: %02x:%02x:%02x \t %02x:%02x\n", - dev_info.bus, dev_info.devid, dev_info.function, - dev_info.vendor_id, dev_info.device_id); + strlcpy(dev_info.name, conf->name, RTE_KNI_NAMESIZE); ret = kni_reserve_mz(kni); if (ret < 0) @@ -313,7 +309,7 @@ rte_kni_alloc(struct rte_mempool *pktmbuf_pool, kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); TAILQ_INSERT_TAIL(kni_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); /* Allocate mbufs and then put them into alloc_q */ kni_allocate_mbufs(kni); @@ -327,7 +323,7 @@ mz_fail: kni_fail: rte_free(te); unlock: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return NULL; } @@ -353,6 +349,19 @@ va2pa(struct rte_mbuf *m) (unsigned long)m->buf_iova)); } +static void * +va2pa_all(struct rte_mbuf *mbuf) +{ + void *phy_mbuf = va2pa(mbuf); + struct rte_mbuf *next = mbuf->next; + while (next) { + mbuf->next = va2pa(next); + mbuf = next; + next = mbuf->next; + } + return phy_mbuf; +} + static void obj_free(struct rte_mempool *mp __rte_unused, void *opaque, void *obj, unsigned obj_idx __rte_unused) @@ -390,7 +399,7 @@ rte_kni_release(struct rte_kni *kni) kni_list = RTE_TAILQ_CAST(rte_kni_tailq.head, rte_kni_list); - rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_lock(); TAILQ_FOREACH(te, kni_list, next) { if (te->data == kni) @@ -400,7 +409,7 @@ rte_kni_release(struct rte_kni *kni) if (te == NULL) goto unlock; - snprintf(dev_info.name, sizeof(dev_info.name), "%s", kni->name); + strlcpy(dev_info.name, kni->name, sizeof(dev_info.name)); if (ioctl(kni_fd, RTE_KNI_IOCTL_RELEASE, &dev_info) < 0) { RTE_LOG(ERR, KNI, "Fail to release kni device\n"); goto unlock; @@ -408,7 +417,7 @@ rte_kni_release(struct rte_kni *kni) TAILQ_REMOVE(kni_list, te, next); - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); /* mbufs in all fifo should be released, except request/response */ @@ -432,7 +441,7 @@ rte_kni_release(struct rte_kni *kni) return 0; unlock: - rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_write_unlock(); return -1; } @@ -451,7 +460,7 @@ kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]) RTE_LOG(INFO, KNI, "Configure mac address of %d", port_id); ret = rte_eth_dev_default_mac_addr_set(port_id, - (struct ether_addr *)mac_addr); + (struct rte_ether_addr *)mac_addr); if (ret < 0) RTE_LOG(ERR, KNI, "Failed to config mac_addr for port %d\n", port_id); @@ -482,7 +491,7 @@ kni_config_promiscusity(uint16_t port_id, uint8_t to_on) int rte_kni_handle_request(struct rte_kni *kni) { - unsigned ret; + unsigned int ret; struct rte_kni_request *req = NULL; if (kni == NULL) @@ -507,8 +516,8 @@ rte_kni_handle_request(struct rte_kni *kni) break; case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */ if (kni->ops.config_network_if) - req->result = kni->ops.config_network_if(\ - kni->ops.port_id, req->if_up); + req->result = kni->ops.config_network_if(kni->ops.port_id, + req->if_up); break; case RTE_KNI_REQ_CHANGE_MAC_ADDR: /* Change MAC Address */ if (kni->ops.config_mac_address) @@ -543,14 +552,15 @@ rte_kni_handle_request(struct rte_kni *kni) } unsigned -rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { + num = RTE_MIN(kni_fifo_free_count(kni->rx_q), num); void *phy_mbufs[num]; unsigned int ret; unsigned int i; for (i = 0; i < num; i++) - phy_mbufs[i] = va2pa(mbufs[i]); + phy_mbufs[i] = va2pa_all(mbufs[i]); ret = kni_fifo_put(kni->rx_q, phy_mbufs, num); @@ -561,9 +571,9 @@ rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) } unsigned -rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num) +rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned int num) { - unsigned ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); + unsigned int ret = kni_fifo_get(kni->tx_q, (void **)mbufs, num); /* If buffers removed, allocate mbufs and then put them into alloc_q */ if (ret) @@ -614,7 +624,7 @@ kni_allocate_mbufs(struct rte_kni *kni) return; } - allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) \ + allocq_free = (kni->alloc_q->read - kni->alloc_q->write - 1) & (MAX_MBUF_BURST_NUM - 1); for (i = 0; i < allocq_free; i++) { pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool); @@ -649,11 +659,11 @@ rte_kni_get(const char *name) if (name == NULL || name[0] == '\0') return NULL; - rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_lock(); kni = __rte_kni_get(name); - rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK); + rte_mcfg_tailq_read_unlock(); return kni; } @@ -668,35 +678,35 @@ static enum kni_ops_status kni_check_request_register(struct rte_kni_ops *ops) { /* check if KNI request ops has been registered*/ - if( NULL == ops ) + if (ops == NULL) return KNI_REQ_NO_REGISTER; - if ((ops->change_mtu == NULL) - && (ops->config_network_if == NULL) - && (ops->config_mac_address == NULL) - && (ops->config_promiscusity == NULL)) + if (ops->change_mtu == NULL + && ops->config_network_if == NULL + && ops->config_mac_address == NULL + && ops->config_promiscusity == NULL) return KNI_REQ_NO_REGISTER; return KNI_REQ_REGISTERED; } int -rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) +rte_kni_register_handlers(struct rte_kni *kni, struct rte_kni_ops *ops) { enum kni_ops_status req_status; - if (NULL == ops) { + if (ops == NULL) { RTE_LOG(ERR, KNI, "Invalid KNI request operation.\n"); return -1; } - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } req_status = kni_check_request_register(&kni->ops); - if ( KNI_REQ_REGISTERED == req_status) { + if (req_status == KNI_REQ_REGISTERED) { RTE_LOG(ERR, KNI, "The KNI request operation has already registered.\n"); return -1; } @@ -708,7 +718,7 @@ rte_kni_register_handlers(struct rte_kni *kni,struct rte_kni_ops *ops) int rte_kni_unregister_handlers(struct rte_kni *kni) { - if (NULL == kni) { + if (kni == NULL) { RTE_LOG(ERR, KNI, "Invalid kni info.\n"); return -1; } @@ -718,7 +728,7 @@ rte_kni_unregister_handlers(struct rte_kni *kni) return 0; } -int __rte_experimental +int rte_kni_update_link(struct rte_kni *kni, unsigned int linkup) { char path[64];